diff --git a/.cursor/rules/.gitkeep b/.cursor/rules/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/.cursor/rules/coding_conventions.mdc b/.cursor/rules/coding_conventions.mdc new file mode 100644 index 00000000000..80c9a9597a4 --- /dev/null +++ b/.cursor/rules/coding_conventions.mdc @@ -0,0 +1,317 @@ +--- +description: Coding Conventions +globs: *.go +alwaysApply: false +--- +# Coding Conventions + +## High-Assurance Software Engineering Principles + +Flow is a high-assurance software project where the cost of bugs that slip through can be catastrophically high. We consider all inputs to be potentially byzantine. This fundamentally shapes our approach to error handling and code correctness: + +### Inversion of Default Safety Assumptions +- Traditional software engineering often assumes code paths are safe unless proven dangerous +- In Flow, we invert this: **no code path is considered safe unless explicitly proven and documented to be safe** +- The mere absence of known failure cases is NOT sufficient evidence of safety +- We require conclusive arguments for why each code path will always behave correctly + +### Context-Dependent Error Classification + +A critical rule in Flow's error handling is that **the same error type can be benign in one context but an exception in another**. Error classification depends on the caller's context, not the error's type. + +Key principles: +- An error type alone CANNOT determine whether it's benign or an exception +- The caller's context and expectations determine the error's severity +- The same error type may be handled differently in different contexts +- Documentation *must* specify which errors are benign in which contexts + +Example of context-dependent error handling, where `storage.ErrNotFound` is _benign_: +```go +// We're checking if we need to request a block from another node +// +// No Expected errors during normal operations. +func (s *Synchronizer) checkBlockExists(blockID flow.Identifier) error { + _, err := s.storage.ByBlockID(blockID) + if errors.Is(err, storage.ErrNotFound) { + // Expected during normal operation - request block from peer. + return s.requestBlockFromPeer(blockID) // Expecting no errors from this call under normal operations + } + if err != nil { + // Other storage errors are unexpected + return fmt.Errorf("unexpected storage error: %w", err) + } + return nil +} +``` + +However, in this context, the same `storage.ErrNotFound` is not expected during normal operations (we term unexpected errors as "exceptions"): +```go +// We're trying to read a block we know was finalized +// +// No Expected errors during normal operations. +func (s *State) GetFinalizedBlock(height uint64) (*flow.Block, error) { + blockID, err := s.storage.FinalizedBlockID(height) + if err != nil { + return nil, fmt.Errorf("could not get finalized block ID: %w", err) + } + + // At this point, we KNOW the block should exist + block, err := s.storage.ByBlockID(blockID) + if err != nil { + // Any error here (including ErrNotFound) indicates a bug or corruption + return nil, irrecoverable.NewExceptionf( + "storage corrupted - failed to get finalized block %v: %w", + blockID, err) + } + return block, nil +} +``` + +### Rules for Error Classification + +1. **Documentation Requirements** + - Functions MUST document which error types are benign in their context + - Documentation MUST explain WHY an error is considered benign + - Absence of documentation means an error is treated as an exception + +2. **Error Propagation** + - When propagating errors, evaluate if they remain benign in the new context + - If a benign error from a lower layer indicates a critical failure in your context, wrap it as an exception + - Use `irrecoverable.NewExceptionf` when elevating a benign error to an exception + +3. **Testing Requirements** + - Tests MUST verify error handling in different contexts + - Test that benign errors in one context are properly elevated to exceptions in another + - Mock dependencies to test both benign and exceptional paths + +### Error Handling Philosophy +- All errors are considered potentially fatal by default +- Only explicitly documented benign errors are safe to recover from +- For any undocumented error case, we must assume the execution state is corrupted +- Recovery from undocumented errors requires node restart from last known safe state +- This conservative approach prioritizes safety over continuous operation + +Example of proper high-assurance error handling: +```go +func (e *engine) process(event interface{}) error { + // Step 1: type checking of input + switch v := event.(type) { + case *ValidEvent: + // explicitly documented safe path + return e.handleValidEvent(v) + default: + // undocumented event type - unsafe to proceed + return fmt.Errorf("unexpected event type %T: %w", event, ErrInvalidEventType) + } +} + +func (e *engine) Submit(event interface{}) { + err := e.process(event) + if errors.Is(err, ErrInvalidEventType) { + // This is a documented benign error - safe to handle + metrics.InvalidEventsCounter.Inc() + return + } + if err != nil { + // Any other error is potentially fatal + // We cannot prove it's safe to continue + e.log.Fatal().Err(err).Msg("potentially corrupted state - must restart") + return + } +} +``` + +## 1. Code Documentation +- Every interface must have clear documentation +- Copy and extend interface documentation in implementations +- Include clear explanations for any deviations from conventions +- Document all public functions individually +- Document error handling strategies and expected error types + +Example of proper error documentation: +```go +// foo does abc. +// Expected errors during normal operations: +// - ErrXFailed: if x failed +func foo() err { + ... + return fmt.Errorf("details about failure: %w", ErrXFailed) +} +``` + +## 2. Code Structure +- Follow the component-based architecture +- Each component must implement the `Component` interface +- Clearly differentiate between trusted (internal) and untrusted (external) inputs +- Components should have dedicated worker pools +- Proper resource management with worker limits +- Proper state management and recovery + +## 3. Error Categories and Handling Philosophy + +### a. Benign Errors +- Component remains fully functional despite the error +- Expected during normal operations +- Must be handled within the component +- Must be documented in the component's context +- Must be represented as typed sentinel errors +- Cannot be represented by generic/untyped errors unless explicitly documented as an optional simplification for components that solely return benign errors + +Example of proper benign error handling: +```go +// Expected errors during normal operations: +// * ErrXFailed: if x failed +func benignErrorExample() error { + err := foo() + if err != nil { + return fmt.Errorf("failed to do foo: %w", err) + } + return nil +} +``` + +### b. Exceptions +- Potential symptoms of internal state corruption +- Unexpected failures that may compromise component state +- Should lead to component restart or node termination +- Strongly encouraged to wrap with context when bubbling up + +Example of proper exception handling: +```go +err := foo() +if errors.Is(err, XFailedErr) { + // expected error + return +} +if err != nil { + log.Fatal().Err(err).Msg("unexpected internal error") + return +} +``` + +### c. Sentinel Error Requirements +- Must be properly typed +- Must be documented in GoDoc +- Must avoid generic error formats +- Must always wrap with context when bubbling up the call stack +- Must document all expected error types +- Must handle at the appropriate level where context is available +- Must use proper error wrapping for stack traces + +Example of proper sentinel error definition and usage: +```go +ErrXFailed := errors.New("x failed") + +// bar does ... +// Expected error returns during normal operations: +// * XFailedErr: if x failed +func bar() err { + ... + err := foo() + if err != nil { + return fmt.Errorf("failed to do foo: %w", err) + } + ... +} +``` + +## 4. Additional Best Practices +- Prioritize safety over liveness +- Don't continue on "best-effort" basis when encountering unexpected errors +- Testing Error Handling: + - Test both benign error cases and exceptions + - Must verify that documented sentinel errors are returned in their specified situations + - Must verify that unexpected errors (exceptions) from lower layers or their mocks are not misinterpreted as benign errors + - Verify proper error propagation + - Test component recovery from errors + - Validate error handling in both trusted and untrusted contexts + +Example of proper error handling in components: +```go +func (e *engine) process(event interface{}) error { + switch v := event.(type) { + ... + default: + return fmt.Errorf("invalid input type %T: %w", event, InvalidMessageType) + } +} + +func (e *engine) Process(chan network.Channel, originID flow.Identifier, event interface{}) error { + err := e.process(event) + if err != nil { + if errors.Is(err, InvalidMessageType) { + // this is EXPECTED during normal operations + } + // this is unexpected during normal operations + e.log.Fatal().Err(err).Msg("unexpected internal error") + } +} + +func (e *engine) ProcessLocal(event interface{}) { + err := e.process(event) + if err != nil { + if errors.Is(err, InvalidMessageType) { + // this is a CRITICAL BUG + } + // this is unexpected during normal operations + e.log.Fatal().Err(err).Msg("unexpected internal error") + } +} +``` + +## 5. Anti-patterns to Avoid +- Don't use generic error logging without proper handling +- Don't swallow errors silently +- Don't continue execution after unexpected errors +- Don't use untyped errors unless explicitly documented as benign + +Example of an anti-pattern to avoid: +```go +// DON'T DO THIS: +err := foo() +if err != nil { + log.Error().Err(err).Msg("foo failed") + return +} +``` + +Instead, implement proper error handling: +```go +func (e *engine) Submit(chan network.Channel, originID flow.Identifier, event interface{}) { + e.unit.Launch(func() { + err := e.process(event) + if errors.Is(err, InvalidMessageType) { + // invalid input: ignore or slash + return + } + if err != nil { + // unexpected input: for now we prioritize safety over liveness and just crash + // TODO: restart engine from known good state + e.log.Fatal().Err(err).Msg("unexpected internal error") + } + }) +} +``` + +## 6. Security Considerations +- Treat all external inputs as potentially byzantine +- Handle byzantine inputs gracefully +- Prevent state corruption from malicious inputs +- Use proper error types for security-related issues + +Example of handling untrusted inputs: +```go +func (e *engine) Submit(event interface{}) { + e.unit.Launch(func() { + err := e.process(event) + if errors.Is(err, InvalidMessageType) { + // invalid input from external source: ignore or slash + return + } + if err != nil { + // unexpected input: prioritize safety over liveness + e.log.Fatal().Err(err).Msg("unexpected internal error") + } + }) +} +``` diff --git a/.cursor/rules/core.mdc b/.cursor/rules/core.mdc new file mode 100644 index 00000000000..70425f0480c --- /dev/null +++ b/.cursor/rules/core.mdc @@ -0,0 +1,45 @@ +--- +description: Cursor Operational Doctrine +globs: +alwaysApply: true +--- +# Cursor Operational Doctrine + +You are an AI with extensive expertise in byzantine-fault-tolerant, distributed software engineering. You will consider scalability, reliability, maintainability, and security in your recommendations. + +You are working in a pair-programming setting with a senior engineer. Their time is valuable, so work time-efficiently. They prefer an iterative working style, where you take one step at a time, confirm the direction is correct and then proceed. +Critically reflect on your work. Ask if you are not sure. Avoid confirmation bias - speak up (short and concisely reasoning, followed by tangible suggestions) if something should be changed or approached differently in your opinion. + +## Primary directive + +Your peer's instructions, questions, requests **always** take precedence over any general rules (such as the ones below). + +## Interactions with your peer +- Never use apologies. +- Acknowledge if you missunderstood something, and concisely summarize what you have learned. +- Only when explicitly requested, provide feedback about your understanding of comments, documentation, code +- Don't show or discuss the current implementation unless specifically requested. +- State which files have been modifed and very briefly in which regard. But don't provide excerpts of changes made. +- Don't ask for confirmation of information already provided in the context. +- Don't ask your peer to verify implementations that are visible in the provided context. +- Always provide links to the real files, not just the names x.md. + +## Verify Information +- Always verify information before presenting it. Do not make assumptions or speculate without clear evidence. +- For all changes you made, review your changes in the broader context of the component you are modifying. + - internally, construct a correctness argument as evidence that the updated component will _always_ behave correctly + - memorize your correctness argument, but do not immediately include it in your response unless specifically requested by your peer + +## Software Design Approach +- Leverage existing abstractions; refactor them judiciously. +- Augment with tests, logging, and API exposition once the core business logic is robust. +- Ensure new packages are modular, orthogonal, and future-proof. + +## No Inventions +Don't invent changes other than what's explicitly requested. + +## No Unnecessary Updates +- Don't remove unrelated code or functionalities. +- Don't suggest updates or changes to files when there are no actual modifications needed. +- Don't suggest whitespace changes. + diff --git a/.cursor/rules/godocs.mdc b/.cursor/rules/godocs.mdc new file mode 100644 index 00000000000..5606bebd59f --- /dev/null +++ b/.cursor/rules/godocs.mdc @@ -0,0 +1,298 @@ +--- +description: +globs: *.go +alwaysApply: false +--- +# Go Documentation Rule + +## General Guidance + +- Add godocs comments for all types, variables, constants, functions, and interfaces. +- Begin with the name of the entity. +- Use complete sentences. +- **ALL** methods that return an error **MUST** document expected error conditions! +- When updating existing code, if godocs exist, keep the existing content and improve formating/expand with additional details to conform with these rules. +- If any details are unclear, **DO NOT make something up**. Add a TODO to fill in the missing details or ask the user for clarification. + +## Method Rules +```go +// MethodName performs a specific action or returns specific information. +// +// Returns: (only if additional interpretation of return values is needed beyond the method / function signature) +// - return1: description of non-obvious aspects +// - return2: description of non-obvious aspects +// +// Expected errors during normal operations: +// - ErrType1: when and why this error occurs +// - ErrType2: when and why this error occurs +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +// +// Safe for concurrent access (default, may be omitted) +// CAUTION: not concurrency safe! (if applicable, documentation is obligatory) +``` + +### Method Description + - First line must be a complete sentence describing what the method does + - Use present tense + - Start with the method name + - End with a period + - Prefer a concise description that naturally incorporates the meaning of parameters + - Example: + ```go + // ByBlockID returns the header with the given ID. It is available for finalized and ambiguous blocks. + // Error returns: + // - ErrNotFound if no block header with the given ID exists + ByBlockID(blockID flow.Identifier) (*flow.Header, error) + ``` + +### Parameters + - Only document parameters separately when they have non-obvious aspects: + - Complex constraints or requirements + - Special relationships with other parameters + - Formatting or validation rules + - Example: + ```go + // ValidateTransaction validates the transaction against the current state. + // + // Parameters: + // - script: must be valid BPL-encoded script with max size of 64KB + // - accounts: must contain at least one account with signing capability + ``` + +### Returns + - Only document return values if there is **additional information** necessary to interpret the function's or method's return values, which is not apparent from the method signature's return values + - When documenting non-error returns, be concise and focus only on non-obvious aspects: + ```go + // Example 1 - No return docs needed (self-explanatory): + // GetHeight returns the block's height. + + // Example 2 - Additional context needed: + // GetPipeline returns the execution pipeline, or nil if not configured. + + // Example 3 - Complex return value needs explanation: + // GetBlockStatus returns the block's current status. + // Returns: + // - status: PENDING if still processing, FINALIZED if complete, INVALID if failed validation + ``` + - Error returns documentation is mandatory (see section `Error Returns` below) + + +### Error Documentation + - Error classification is context-dependent - the same error type can be benign in one context but an exception in another + - **ALL** methods that return an error **MUST** document exhaustively all benign errors that can be returned (if there are any) + - Error documentation should be the last part of a method's or function's documentation + - Only document benign errors that are expected during normal operations + - Exceptions (unexpected errors) are not individually documented in the error section. Instead, we include the catch-all statement: `All other errors are potential indicators of bugs or corrupted internal state (continuation impossible)` + + Before documenting any error, verify: + - [ ] The error type exists in the codebase (for sentinel errors) + - [ ] The error is actually returned by the method + - [ ] The error handling matches the documented behavior + - [ ] The error is benign in this specific context + - [ ] If wrapping a sentinel error with fmt.Errorf, document the original sentinel error type + - [ ] The error documentation follows the standard format + + Error documentation must follow this format: + ```go + // Expected errors during normal operations: + // - ErrTypeName: when and why this error occurs (for sentinel errors) + // - ErrWrapped: when wrapped via fmt.Errorf, document the original sentinel error + // - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) + ``` + + For methods where all errors are exceptions: + ```go + // No errors are expected during normal operation. + ``` + + Common mistakes to avoid: + - Don't document errors that aren't returned + - Don't document generic fmt.Errorf errors unless they wrap a sentinel error + - Don't document exceptions (unexpected errors that may indicate bugs) + - Don't mix benign and exceptional errors without clear distinction + - Don't omit the catch-all statement about other errors + - Don't document implementation details that might change + + Examples: + ```go + // Example 1: Method with sentinel errors + // GetBlock returns the block with the given ID. + // Expected errors during normal operations: + // - ErrNotFound: when the block doesn't exist + // - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) + + // Example 2: Method wrapping a sentinel error + // ValidateTransaction validates the transaction against the current state. + // Expected errors during normal operations: + // - ErrInvalidSignature: when the transaction signature is invalid (wrapped) + // - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) + + // Example 3: Method with only exceptional errors + // ProcessFinalizedBlock processes a block that is known to be finalized. + // No errors are expected during normal operation. + + // Example 4: Method with context-dependent error handling + // ByBlockID returns the block with the given ID. + // Expected errors during normal operations: + // - ErrNotFound: when requesting non-finalized blocks that don't exist + // - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) + // Note: ErrNotFound is NOT expected when requesting finalized blocks + ``` + +### Concurrency Safety + - By default, we assume methods and functions to be concurrency safe. + - Every struct and interface must explicitly state whether it is safe for concurrent access + - If not thread-safe, explain why + - For methods or functions that are not concurrency safe (deviating from the default), it is **mandatory** to diligently document this by including the following call-out: + ```go + // CAUTION: not concurrency safe! + ``` + - If **all methods** of a struct or interface are thread-safe, only document this in the struct's or interface's godoc and mention that all methods are thread-safe. Do not include the line in each method: + ```go + // Safe for concurrent access + ``` + +### Special Cases + - For getters/setters, use simplified format: + ```go + // GetterName returns the value of the field. + // Returns: + // - value: description of the returned value + ``` + - For constructors, use: + ```go + // NewTypeName creates a new instance of TypeName. + // Parameters: + // - param1: description of param1 + // Returns: + // - *TypeName: the newly created instance + // - error: any error that occurred during creation + ``` + +### Private Methods + - Private methods should still be documented + - Can use more technical language + - Focus on implementation details + - Must include error documention for any method that returns an error + +## Examples + +### Standard Method Example +```go +// AddReceipt adds the given execution receipt to the container and associates it with the block. +// Returns true if the receipt was added, false if it already existed. Safe for concurrent access. +// +// Expected errors during normal operations: +// - ErrInvalidReceipt: when the receipt is malformed +// - ErrDuplicateReceipt: when the receipt already exists +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +``` + +### Getter Method Example +```go +// Pipeline returns the pipeline associated with this execution result container. +// Returns nil if no pipeline is set. Safe for concurrent access +``` + +### Constructor Example +```go +// NewExecutionResultContainer creates a new instance of ExecutionResultContainer with the given result and pipeline. +// +// Expected Errors: +// - ErrInvalidBlock: when the block ID doesn't match the result's block ID +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +``` + +## Interface Documentation +1. **Interface Description** + - Start with the interface name + - Describe the purpose and behavior of the interface + - Explain any invariants or guarantees the interface provides + - Explicitly state whether it is safe for concurrent access + - Example: + ```go + // Executor defines the interface for executing transactions. + // Implementations must guarantee thread-safety and handle byzantine inputs gracefully. + type Executor interface { + // ... methods ... + } + ``` + +2. **Interface Methods** + - Document each method in the interface + - Focus on the contract/behavior rather than implementation details + - Include error documentation for methods that return errors + - Ensure that the interface documentation is consistent with the structs' documentations implementing this interface + - Every sentinel error that can be returned by any of the implementations must also be documented by the interface. + - Example: + ```go + // Execute processes the given transaction and returns its execution result. + // The method must be idempotent and handle byzantine inputs gracefully. + // + // Expected Errors: + // - ErrInvalidTransaction: when the transaction is malformed + // - ErrExecutionFailed: when the transaction execution fails + Execute(tx *Transaction) (*Result, error) + ``` + +## Constants and Variables +1. **Constants** + - Document the purpose and usage of each constant + - Include any constraints or invariants + - Example: + ```go + // MaxBlockSize defines the maximum size of a block in bytes. + // This value must be a power of 2 and cannot be changed after initialization. + const MaxBlockSize = 1024 * 1024 + ``` + +2. **Variables** + - Document the purpose and lifecycle of each variable + - Include any thread-safety considerations + - Example: + ```go + // defaultConfig holds the default configuration for the system. + // This variable is read-only after initialization and safe for concurrent access. + var defaultConfig = &Config{ + // ... fields ... + } + ``` + +## Type Documentation +1. **Type Description** + - Start with the type name + - Describe the purpose and behavior of the type + - Include any invariants or guarantees + - Example: + ```go + // Block represents a block in the Flow blockchain. + // Blocks are immutable once created and contain a list of transactions. + // All exported methods are safe for concurrent access. + type Block struct { + // ... fields ... + } + ``` + +2. **Type Fields** + - Document each field with its purpose and constraints + - Include any thread-safety considerations + - Example: + ```go + type Block struct { + // Header contains the block's metadata and cryptographic commitments. + // This field is immutable after block creation. + Header *BlockHeader + + // Payload contains the block's transactions and execution results. + // This field is immutable after block creation. + Payload *BlockPayload + + // Signature is the cryptographic signature of the block proposer. + // This field must be set before the block is considered valid. + Signature []byte + } + ``` + +3. **Type Methods** + - Document each method following the method documentation rules + - Include error documentation for methods that return errors diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d66f69dece5..214de8b5c47 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -43,7 +43,15 @@ jobs: with: # Key should change whenever implementation (tools/structwrite), or compilation config (.custom-gcl.yml) changes # When the key is different, it is a cache miss, and the custom linter binary is recompiled - key: custom-linter-${{ env.GO_VERSION }}-${{ runner.os }}-${{ hashFiles('.custom-gcl.yml', 'tools/structwrite/**') }} + # We include the SHA in the hash key because: + # - cache keys are branch/reference-scoped, with some exceptions (see https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache) + # - (we believe) cache keys for a repo share one namespace (sort of implied by https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/caching-dependencies-to-speed-up-workflows#matching-a-cache-key) + # - (we believe) the same cache being written by two different branches may cause contention, + # as a result of the shared namespace and branch-scoped permissions + key: custom-linter-${{ env.GO_VERSION }}-${{ runner.os }}-${{ hashFiles('.custom-gcl.yml', 'tools/structwrite/**') }}-${{ github.sha }} + # If a matching cache item from a different branch exists, and we have permission to access it, use it. + restore-keys: | + custom-linter-${{ env.GO_VERSION }}-${{ runner.os }}-${{ hashFiles('.custom-gcl.yml', 'tools/structwrite/**') }} path: tools/custom-gcl # path defined in .custom-gcl.yml lookup-only: 'true' # if already cached, don't download here # We install the non-custom golangci-lint binary using the golangci-lint action. @@ -83,8 +91,11 @@ jobs: id: cache-linter uses: actions/cache@v3 with: - # key should change whenever your linter code or Go version changes - key: custom-linter-${{ env.GO_VERSION }}-${{ runner.os }}-${{ hashFiles('.custom-gcl.yml', 'tools/structwrite/**') }} + # See "Cache custom linter binary" job for information about the key structure + key: custom-linter-${{ env.GO_VERSION }}-${{ runner.os }}-${{ hashFiles('.custom-gcl.yml', 'tools/structwrite/**') }}-${{ github.sha }} + # If a matching cache item from a different branch exists, and we have permission to access it, use it. + restore-keys: | + custom-linter-${{ env.GO_VERSION }}-${{ runner.os }}-${{ hashFiles('.custom-gcl.yml', 'tools/structwrite/**') }} path: tools/custom-gcl # We are using the cache to share data between the build-linter job and the 3 lint jobs # If there is a cache miss, it likely means either the build-linter job failed or the cache entry was evicted @@ -303,6 +314,15 @@ jobs: with: go-version: ${{ env.GO_VERSION }} cache: true + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + if: ${{ (github.event_name == 'merge_group' || (github.event.pull_request && (github.event.pull_request.author_association == 'MEMBER' || github.event.pull_request.author_association == 'COLLABORATOR'))) }} + + - name: Docker build env: CADENCE_DEPLOY_KEY: ${{ secrets.CADENCE_DEPLOY_KEY }} diff --git a/.github/workflows/image_builds.yml b/.github/workflows/image_builds.yml index fbe1c0c0d22..948472a3441 100644 --- a/.github/workflows/image_builds.yml +++ b/.github/workflows/image_builds.yml @@ -93,6 +93,12 @@ jobs: - name: Setup Google Cloud Authentication run: gcloud auth configure-docker ${{ env.PRIVATE_REGISTRY_HOST }} + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Execute ${{ matrix.build_command }} command to build and push images env: IMAGE_TAG: ${{ inputs.tag }} @@ -131,7 +137,40 @@ jobs: ref: master-private workflow_file_name: 'secure_build.yml' - promote-images: + promote-to-partner-registry: + # This job promotes container images from the private registry to the partner registry. + # As of right now, the only role being promoted to the partner registry is 'access'. + # It uses a matrix strategy to handle the promotion of images for different roles in parallel. + # The environments defined for each role are used to gate the promotion process. + # This ensures that only approved images are deployed to the partner registry. + name: Promote Images to Partner Registry + runs-on: ubuntu-latest + needs: [public-build, secure-build] + # This job will only run if the previous jobs were successful and not cancelled. + # It checks the results of both the public and secure builds to ensure that at least one of them succeeded. + if: | + ${{ !cancelled() }} && + ${{ needs.public-build.result != 'failure' || needs.secure-build.result != 'failure' }} + strategy: + fail-fast: false + matrix: + role: [access] + environment: ${{ matrix.role }} image promotion to partner registry + steps: + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Promote ${{ matrix.role }} + uses: ./actions/promote-images + with: + gcp_credentials: ${{ secrets.PARTNER_REGISTRY_PROMOTION_SECRET }} + private_registry: ${{ vars.PRIVATE_REGISTRY }} + private_registry_host: ${{ env.PRIVATE_REGISTRY_HOST }} + promotion_registry: ${{ vars.PARTNER_REGISTRY }} + role: ${{ matrix.role }} + tags: "${{ inputs.tag }},${{ inputs.tag }}-without-adx,${{ inputs.tag }}-without-netgo-without-adx,${{ inputs.tag }}-arm" + + promote-to-public-registry: # This job promotes container images for various roles from a private registry to a public registry. # It uses a matrix strategy to handle the promotion of images for different roles in parallel. # The environments defined for each role are used to gate the promotion process. @@ -159,7 +198,7 @@ jobs: gcp_credentials: ${{ secrets.PUBLIC_REGISTRY_PROMOTION_SECRET }} private_registry: ${{ vars.PRIVATE_REGISTRY }} private_registry_host: ${{ env.PRIVATE_REGISTRY_HOST }} - public_registry: ${{ vars.PUBLIC_REGISTRY }} + promotion_registry: ${{ vars.PUBLIC_REGISTRY }} role: ${{ matrix.role }} tags: "${{ inputs.tag }},${{ inputs.tag }}-without-adx,${{ inputs.tag }}-without-netgo-without-adx,${{ inputs.tag }}-arm" diff --git a/.gitignore b/.gitignore index 351d3d4004b..59074591874 100644 --- a/.gitignore +++ b/.gitignore @@ -44,6 +44,12 @@ flowdb .idea .vscode *.code-workspace +# ignore all files in the .cursor directory, except for the rules directory +.cursor/* +!.cursor/rules/ +!.cursor/rules/* +# do ignore rules files matching private-* +.cursor/rules/private-* git language/tools/vscode-extension/cadence-*.vsix language/tools/vscode-extension/out/* diff --git a/.golangci.yml b/.golangci.yml index 353ae034991..4a0be7a073e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -40,6 +40,12 @@ issues: linters: - unused - structwrite + - path: 'consensus/hotstuff/helper/*' # disable some linters on test helper files + linters: + - structwrite + - path: 'utils/unittest/*' # disable some linters on test files + linters: + - structwrite # typecheck currently not handling the way we do function inheritance well # disabling for now - path: 'cmd/access/node_build/*' diff --git a/CODEOWNERS b/CODEOWNERS index 9f34b096dbc..c00cc40e9ed 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,5 +1,2 @@ # Protocol owners are not generally differentiated by sub-area for simplicity /** @onflow/flow-core-protocol - -# FVM specific -/fvm/** @janezpodhostnik @zhangchiqing diff --git a/Makefile b/Makefile index d72ab8c41f0..ca47a474c27 100644 --- a/Makefile +++ b/Makefile @@ -175,7 +175,6 @@ generate-mocks: install-mock-generators mockery --name '.*' --dir=module/component --case=underscore --output="./module/component/mock" --outpkg="component" mockery --name '.*' --dir=network --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork" mockery --name '.*' --dir=storage --case=underscore --output="./storage/mock" --outpkg="mock" - mockery --name 'DeferredDBUpdate' --dir=storage/badger/transaction --case=underscore --output="storage/mock" --outpkg="mock" mockery --name '.*' --dir="state/protocol" --case=underscore --output="state/protocol/mock" --outpkg="mock" mockery --name '.*' --dir="state/protocol/events" --case=underscore --output="./state/protocol/events/mock" --outpkg="mock" mockery --name '.*' --dir="state/protocol/protocol_state" --case=underscore --output="state/protocol/protocol_state/mock" --outpkg="mock" @@ -209,7 +208,13 @@ generate-mocks: install-mock-generators mockery --name 'LinkGenerator' --dir="./engine/access/rest/common/models" --case=underscore --output="./engine/access/rest/common/models/mock" --outpkg="mock" mockery --name 'WebsocketConnection' --dir="./engine/access/rest/websockets" --case=underscore --output="./engine/access/rest/websockets/mock" --outpkg="mock" mockery --name 'ConnectionFactory' --dir="./engine/access/rpc/connection" --case=underscore --output="./engine/access/rpc/connection/mock" --outpkg="mock" - mockery --name 'Communicator' --dir="./engine/access/rpc/backend" --case=underscore --output="./engine/access/rpc/backend/mock" --outpkg="mock" + mockery --name 'Communicator' --dir="./engine/access/rpc/backend/node_communicator" --case=underscore --output="./engine/access/rpc/backend/node_communicator/mock" --outpkg="mock" + mockery --name 'AccountProvider' --dir="./engine/access/rpc/backend/accounts/provider" --case=underscore --output="./engine/access/rpc/backend/accounts/provider/mock" --outpkg="mock" + mockery --name 'EventProvider' --dir="./engine/access/rpc/backend/events/provider" --case=underscore --output="./engine/access/rpc/backend/events/provider/mock" --outpkg="mock" + mockery --name 'TransactionProvider' --dir="./engine/access/rpc/backend/transactions/provider" --case=underscore --output="./engine/access/rpc/backend/transactions/provider/mock" --outpkg="mock" + mockery --name 'Provider' --dir="./engine/access/rpc/backend/transactions/error_messages" --case=underscore --output="./engine/access/rpc/backend/transactions/error_messages/mock" --outpkg="mock" + mockery --name 'TransactionSender' --dir="./engine/access/rpc/backend/transactions/retrier" --case=underscore --output="./engine/access/rpc/backend/transactions/retrier/mock" --outpkg="mock" + mockery --name 'Retrier' --dir="./engine/access/rpc/backend/transactions/retrier" --case=underscore --output="./engine/access/rpc/backend/transactions/retrier/mock" --outpkg="mock" mockery --name '.*' --dir=model/fingerprint --case=underscore --output="./model/fingerprint/mock" --outpkg="mock" mockery --name 'ExecForkActor' --structname 'ExecForkActorMock' --dir=module/mempool/consensus/mock/ --case=underscore --output="./module/mempool/consensus/mock/" --outpkg="mock" mockery --name '.*' --dir=engine/verification/fetcher/ --case=underscore --output="./engine/verification/fetcher/mock" --outpkg="mockfetcher" @@ -217,7 +222,9 @@ generate-mocks: install-mock-generators mockery --name 'Storage' --dir=module/executiondatasync/tracker --case=underscore --output="module/executiondatasync/tracker/mock" --outpkg="mocktracker" mockery --name 'ScriptExecutor' --dir=module/execution --case=underscore --output="module/execution/mock" --outpkg="mock" mockery --name 'StorageSnapshot' --dir=fvm/storage/snapshot --case=underscore --output="fvm/storage/snapshot/mock" --outpkg="mock" - mockery --name 'Core' --dir=module/executiondatasync/optimistic_syncing --case=underscore --output="module/executiondatasync/optimistic_syncing/mock" --outpkg="mock" + mockery --name 'Core' --dir=module/executiondatasync/optimistic_sync --case=underscore --output="module/executiondatasync/optimistic_sync/mock" --outpkg="mock" + mockery --name 'Requester' --dir=engine/access/ingestion/tx_error_messages --case=underscore --output="engine/access/ingestion/tx_error_messages/mock" --outpkg="mock" + mockery --name 'ExecutionDataRequester' --dir=module/state_synchronization/requester --case=underscore --output="module/state_synchronization/requester/mock" --outpkg="mock" #temporarily make insecure/ a non-module to allow mockery to create mocks mv insecure/go.mod insecure/go2.mod @@ -243,12 +250,12 @@ tools/custom-gcl: tools/structwrite .custom-gcl.yml .PHONY: lint lint: tidy tools/custom-gcl # revive -config revive.toml -exclude storage/ledger/trie ./... - ./tools/custom-gcl run -v ./... + ./tools/custom-gcl run -v $(or $(LINT_PATH),./...) .PHONY: fix-lint fix-lint: # revive -config revive.toml -exclude storage/ledger/trie ./... - ./tools/custom-gcl run -v --fix ./... + ./tools/custom-gcl run -v --fix $(or $(LINT_PATH),./...) # Runs unit tests with different list of packages as passed by CI so they run in parallel .PHONY: ci diff --git a/README.md b/README.md index 4ea2dfbf633..80cc8db9090 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ digital assets that power them. Read more about it [here](https://github.com/onf ## Documentation -You can find an overview of the Flow architecture on the [documentation website](https://www.onflow.org/primer). +You can find an overview of the Flow architecture on the [documentation website](https://www.flow.com/primer). Development on Flow is divided into work streams. Each work stream has a home directory containing high-level documentation for the stream, as well as links to documentation for relevant components used by that work stream. diff --git a/access/api.go b/access/api.go index 2753b22b352..da7cd41539f 100644 --- a/access/api.go +++ b/access/api.go @@ -10,32 +10,7 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// API provides all public-facing functionality of the Flow Access API. -type API interface { - Ping(ctx context.Context) error - GetNetworkParameters(ctx context.Context) accessmodel.NetworkParameters - GetNodeVersionInfo(ctx context.Context) (*accessmodel.NodeVersionInfo, error) - - GetLatestBlockHeader(ctx context.Context, isSealed bool) (*flow.Header, flow.BlockStatus, error) - GetBlockHeaderByHeight(ctx context.Context, height uint64) (*flow.Header, flow.BlockStatus, error) - GetBlockHeaderByID(ctx context.Context, id flow.Identifier) (*flow.Header, flow.BlockStatus, error) - - GetLatestBlock(ctx context.Context, isSealed bool) (*flow.Block, flow.BlockStatus, error) - GetBlockByHeight(ctx context.Context, height uint64) (*flow.Block, flow.BlockStatus, error) - GetBlockByID(ctx context.Context, id flow.Identifier) (*flow.Block, flow.BlockStatus, error) - - GetCollectionByID(ctx context.Context, id flow.Identifier) (*flow.LightCollection, error) - GetFullCollectionByID(ctx context.Context, id flow.Identifier) (*flow.Collection, error) - - SendTransaction(ctx context.Context, tx *flow.TransactionBody) error - GetTransaction(ctx context.Context, id flow.Identifier) (*flow.TransactionBody, error) - GetTransactionsByBlockID(ctx context.Context, blockID flow.Identifier) ([]*flow.TransactionBody, error) - GetTransactionResult(ctx context.Context, id flow.Identifier, blockID flow.Identifier, collectionID flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) (*accessmodel.TransactionResult, error) - GetTransactionResultByIndex(ctx context.Context, blockID flow.Identifier, index uint32, requiredEventEncodingVersion entities.EventEncodingVersion) (*accessmodel.TransactionResult, error) - GetTransactionResultsByBlockID(ctx context.Context, blockID flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) ([]*accessmodel.TransactionResult, error) - GetSystemTransaction(ctx context.Context, blockID flow.Identifier) (*flow.TransactionBody, error) - GetSystemTransactionResult(ctx context.Context, blockID flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) (*accessmodel.TransactionResult, error) - +type AccountsAPI interface { GetAccount(ctx context.Context, address flow.Address) (*flow.Account, error) GetAccountAtLatestBlock(ctx context.Context, address flow.Address) (*flow.Account, error) GetAccountAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (*flow.Account, error) @@ -47,13 +22,102 @@ type API interface { GetAccountKeyAtBlockHeight(ctx context.Context, address flow.Address, keyIndex uint32, height uint64) (*flow.AccountPublicKey, error) GetAccountKeysAtLatestBlock(ctx context.Context, address flow.Address) ([]flow.AccountPublicKey, error) GetAccountKeysAtBlockHeight(ctx context.Context, address flow.Address, height uint64) ([]flow.AccountPublicKey, error) +} + +type EventsAPI interface { + GetEventsForHeightRange( + ctx context.Context, + eventType string, + startHeight, + endHeight uint64, + requiredEventEncodingVersion entities.EventEncodingVersion, + ) ([]flow.BlockEvents, error) + + GetEventsForBlockIDs( + ctx context.Context, + eventType string, + blockIDs []flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, + ) ([]flow.BlockEvents, error) +} +type ScriptsAPI interface { ExecuteScriptAtLatestBlock(ctx context.Context, script []byte, arguments [][]byte) ([]byte, error) ExecuteScriptAtBlockHeight(ctx context.Context, blockHeight uint64, script []byte, arguments [][]byte) ([]byte, error) ExecuteScriptAtBlockID(ctx context.Context, blockID flow.Identifier, script []byte, arguments [][]byte) ([]byte, error) +} + +type TransactionsAPI interface { + SendTransaction(ctx context.Context, tx *flow.TransactionBody) error + + GetTransaction(ctx context.Context, id flow.Identifier) (*flow.TransactionBody, error) + GetTransactionsByBlockID(ctx context.Context, blockID flow.Identifier) ([]*flow.TransactionBody, error) + + GetTransactionResult(ctx context.Context, txID flow.Identifier, blockID flow.Identifier, collectionID flow.Identifier, encodingVersion entities.EventEncodingVersion) (*accessmodel.TransactionResult, error) + GetTransactionResultByIndex(ctx context.Context, blockID flow.Identifier, index uint32, encodingVersion entities.EventEncodingVersion) (*accessmodel.TransactionResult, error) + GetTransactionResultsByBlockID(ctx context.Context, blockID flow.Identifier, encodingVersion entities.EventEncodingVersion) ([]*accessmodel.TransactionResult, error) + + GetSystemTransaction(ctx context.Context, blockID flow.Identifier) (*flow.TransactionBody, error) + GetSystemTransactionResult(ctx context.Context, blockID flow.Identifier, encodingVersion entities.EventEncodingVersion) (*accessmodel.TransactionResult, error) +} + +type TransactionStreamAPI interface { + // SubscribeTransactionStatuses subscribes to transaction status updates for a given transaction ID. Monitoring starts + // from the latest block to obtain the current transaction status. If the transaction is already in the final state + // ([flow.TransactionStatusSealed] or [flow.TransactionStatusExpired]), all statuses will be prepared and sent to the client + // sequentially. If the transaction is not in the final state, the subscription will stream status updates until the transaction + // reaches the final state. Once a final state is reached, the subscription will automatically terminate. + // + // Parameters: + // - ctx: Context to manage the subscription's lifecycle, including cancellation. + // - txID: The unique identifier of the transaction to monitor. + // - requiredEventEncodingVersion: The version of event encoding required for the subscription. + SubscribeTransactionStatuses( + ctx context.Context, + txID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, + ) subscription.Subscription + + // SendAndSubscribeTransactionStatuses sends a transaction to the execution node and subscribes to its status updates. + // Monitoring begins from the reference block saved in the transaction itself and streams status updates until the transaction + // reaches the final state ([flow.TransactionStatusSealed] or [flow.TransactionStatusExpired]). Once the final status has been reached, the subscription + // automatically terminates. + // + // Parameters: + // - ctx: The context to manage the transaction sending and subscription lifecycle, including cancellation. + // - tx: The transaction body to be sent and monitored. + // - requiredEventEncodingVersion: The version of event encoding required for the subscription. + // + // If the transaction cannot be sent, the subscription will fail and return a failed subscription. + SendAndSubscribeTransactionStatuses( + ctx context.Context, + tx *flow.TransactionBody, + requiredEventEncodingVersion entities.EventEncodingVersion, + ) subscription.Subscription +} + +// API provides all public-facing functionality of the Flow Access API. +type API interface { + AccountsAPI + EventsAPI + ScriptsAPI + TransactionsAPI + TransactionStreamAPI + + Ping(ctx context.Context) error + GetNetworkParameters(ctx context.Context) accessmodel.NetworkParameters + GetNodeVersionInfo(ctx context.Context) (*accessmodel.NodeVersionInfo, error) + + GetLatestBlockHeader(ctx context.Context, isSealed bool) (*flow.Header, flow.BlockStatus, error) + GetBlockHeaderByHeight(ctx context.Context, height uint64) (*flow.Header, flow.BlockStatus, error) + GetBlockHeaderByID(ctx context.Context, id flow.Identifier) (*flow.Header, flow.BlockStatus, error) - GetEventsForHeightRange(ctx context.Context, eventType string, startHeight, endHeight uint64, requiredEventEncodingVersion entities.EventEncodingVersion) ([]flow.BlockEvents, error) - GetEventsForBlockIDs(ctx context.Context, eventType string, blockIDs []flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) ([]flow.BlockEvents, error) + GetLatestBlock(ctx context.Context, isSealed bool) (*flow.Block, flow.BlockStatus, error) + GetBlockByHeight(ctx context.Context, height uint64) (*flow.Block, flow.BlockStatus, error) + GetBlockByID(ctx context.Context, id flow.Identifier) (*flow.Block, flow.BlockStatus, error) + + GetCollectionByID(ctx context.Context, id flow.Identifier) (*flow.LightCollection, error) + GetFullCollectionByID(ctx context.Context, id flow.Identifier) (*flow.Collection, error) GetLatestProtocolStateSnapshot(ctx context.Context) ([]byte, error) GetProtocolStateSnapshotByBlockID(ctx context.Context, blockID flow.Identifier) ([]byte, error) @@ -62,8 +126,6 @@ type API interface { GetExecutionResultForBlockID(ctx context.Context, blockID flow.Identifier) (*flow.ExecutionResult, error) GetExecutionResultByID(ctx context.Context, id flow.Identifier) (*flow.ExecutionResult, error) - // SubscribeBlocks - // SubscribeBlocksFromStartBlockID subscribes to the finalized or sealed blocks starting at the requested // start block id, up until the latest available block. Once the latest is // reached, the stream will remain open and responses are sent for each new @@ -111,8 +173,6 @@ type API interface { // If invalid parameters will be supplied SubscribeBlocksFromLatest will return a failed subscription. SubscribeBlocksFromLatest(ctx context.Context, blockStatus flow.BlockStatus) subscription.Subscription - // SubscribeHeaders - // SubscribeBlockHeadersFromStartBlockID streams finalized or sealed block headers starting at the requested // start block id, up until the latest available block header. Once the latest is // reached, the stream will remain open and responses are sent for each new @@ -208,29 +268,4 @@ type API interface { // // If invalid parameters will be supplied SubscribeBlockDigestsFromLatest will return a failed subscription. SubscribeBlockDigestsFromLatest(ctx context.Context, blockStatus flow.BlockStatus) subscription.Subscription - - // SubscribeTransactionStatuses subscribes to transaction status updates for a given transaction ID. Monitoring starts - // from the latest block to obtain the current transaction status. If the transaction is already in the final state - // ([flow.TransactionStatusSealed] or [flow.TransactionStatusExpired]), all statuses will be prepared and sent to the client - // sequentially. If the transaction is not in the final state, the subscription will stream status updates until the transaction - // reaches the final state. Once a final state is reached, the subscription will automatically terminate. - // - // Parameters: - // - ctx: Context to manage the subscription's lifecycle, including cancellation. - // - txID: The unique identifier of the transaction to monitor. - // - requiredEventEncodingVersion: The version of event encoding required for the subscription. - SubscribeTransactionStatuses(ctx context.Context, txID flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) subscription.Subscription - - // SendAndSubscribeTransactionStatuses sends a transaction to the execution node and subscribes to its status updates. - // Monitoring begins from the reference block saved in the transaction itself and streams status updates until the transaction - // reaches the final state ([flow.TransactionStatusSealed] or [flow.TransactionStatusExpired]). Once the final status has been reached, the subscription - // automatically terminates. - // - // Parameters: - // - ctx: The context to manage the transaction sending and subscription lifecycle, including cancellation. - // - tx: The transaction body to be sent and monitored. - // - requiredEventEncodingVersion: The version of event encoding required for the subscription. - // - // If the transaction cannot be sent, the subscription will fail and return a failed subscription. - SendAndSubscribeTransactionStatuses(ctx context.Context, tx *flow.TransactionBody, requiredEventEncodingVersion entities.EventEncodingVersion) subscription.Subscription } diff --git a/access/mock/api.go b/access/mock/api.go index 60f9ccc1ba8..107bd8ca1f8 100644 --- a/access/mock/api.go +++ b/access/mock/api.go @@ -946,9 +946,9 @@ func (_m *API) GetSystemTransaction(ctx context.Context, blockID flow.Identifier return r0, r1 } -// GetSystemTransactionResult provides a mock function with given fields: ctx, blockID, requiredEventEncodingVersion -func (_m *API) GetSystemTransactionResult(ctx context.Context, blockID flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) (*modelaccess.TransactionResult, error) { - ret := _m.Called(ctx, blockID, requiredEventEncodingVersion) +// GetSystemTransactionResult provides a mock function with given fields: ctx, blockID, encodingVersion +func (_m *API) GetSystemTransactionResult(ctx context.Context, blockID flow.Identifier, encodingVersion entities.EventEncodingVersion) (*modelaccess.TransactionResult, error) { + ret := _m.Called(ctx, blockID, encodingVersion) if len(ret) == 0 { panic("no return value specified for GetSystemTransactionResult") @@ -957,10 +957,10 @@ func (_m *API) GetSystemTransactionResult(ctx context.Context, blockID flow.Iden var r0 *modelaccess.TransactionResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, entities.EventEncodingVersion) (*modelaccess.TransactionResult, error)); ok { - return rf(ctx, blockID, requiredEventEncodingVersion) + return rf(ctx, blockID, encodingVersion) } if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, entities.EventEncodingVersion) *modelaccess.TransactionResult); ok { - r0 = rf(ctx, blockID, requiredEventEncodingVersion) + r0 = rf(ctx, blockID, encodingVersion) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*modelaccess.TransactionResult) @@ -968,7 +968,7 @@ func (_m *API) GetSystemTransactionResult(ctx context.Context, blockID flow.Iden } if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, entities.EventEncodingVersion) error); ok { - r1 = rf(ctx, blockID, requiredEventEncodingVersion) + r1 = rf(ctx, blockID, encodingVersion) } else { r1 = ret.Error(1) } @@ -1006,9 +1006,9 @@ func (_m *API) GetTransaction(ctx context.Context, id flow.Identifier) (*flow.Tr return r0, r1 } -// GetTransactionResult provides a mock function with given fields: ctx, id, blockID, collectionID, requiredEventEncodingVersion -func (_m *API) GetTransactionResult(ctx context.Context, id flow.Identifier, blockID flow.Identifier, collectionID flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) (*modelaccess.TransactionResult, error) { - ret := _m.Called(ctx, id, blockID, collectionID, requiredEventEncodingVersion) +// GetTransactionResult provides a mock function with given fields: ctx, txID, blockID, collectionID, encodingVersion +func (_m *API) GetTransactionResult(ctx context.Context, txID flow.Identifier, blockID flow.Identifier, collectionID flow.Identifier, encodingVersion entities.EventEncodingVersion) (*modelaccess.TransactionResult, error) { + ret := _m.Called(ctx, txID, blockID, collectionID, encodingVersion) if len(ret) == 0 { panic("no return value specified for GetTransactionResult") @@ -1017,10 +1017,10 @@ func (_m *API) GetTransactionResult(ctx context.Context, id flow.Identifier, blo var r0 *modelaccess.TransactionResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) (*modelaccess.TransactionResult, error)); ok { - return rf(ctx, id, blockID, collectionID, requiredEventEncodingVersion) + return rf(ctx, txID, blockID, collectionID, encodingVersion) } if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) *modelaccess.TransactionResult); ok { - r0 = rf(ctx, id, blockID, collectionID, requiredEventEncodingVersion) + r0 = rf(ctx, txID, blockID, collectionID, encodingVersion) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*modelaccess.TransactionResult) @@ -1028,7 +1028,7 @@ func (_m *API) GetTransactionResult(ctx context.Context, id flow.Identifier, blo } if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) error); ok { - r1 = rf(ctx, id, blockID, collectionID, requiredEventEncodingVersion) + r1 = rf(ctx, txID, blockID, collectionID, encodingVersion) } else { r1 = ret.Error(1) } @@ -1036,9 +1036,9 @@ func (_m *API) GetTransactionResult(ctx context.Context, id flow.Identifier, blo return r0, r1 } -// GetTransactionResultByIndex provides a mock function with given fields: ctx, blockID, index, requiredEventEncodingVersion -func (_m *API) GetTransactionResultByIndex(ctx context.Context, blockID flow.Identifier, index uint32, requiredEventEncodingVersion entities.EventEncodingVersion) (*modelaccess.TransactionResult, error) { - ret := _m.Called(ctx, blockID, index, requiredEventEncodingVersion) +// GetTransactionResultByIndex provides a mock function with given fields: ctx, blockID, index, encodingVersion +func (_m *API) GetTransactionResultByIndex(ctx context.Context, blockID flow.Identifier, index uint32, encodingVersion entities.EventEncodingVersion) (*modelaccess.TransactionResult, error) { + ret := _m.Called(ctx, blockID, index, encodingVersion) if len(ret) == 0 { panic("no return value specified for GetTransactionResultByIndex") @@ -1047,10 +1047,10 @@ func (_m *API) GetTransactionResultByIndex(ctx context.Context, blockID flow.Ide var r0 *modelaccess.TransactionResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint32, entities.EventEncodingVersion) (*modelaccess.TransactionResult, error)); ok { - return rf(ctx, blockID, index, requiredEventEncodingVersion) + return rf(ctx, blockID, index, encodingVersion) } if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint32, entities.EventEncodingVersion) *modelaccess.TransactionResult); ok { - r0 = rf(ctx, blockID, index, requiredEventEncodingVersion) + r0 = rf(ctx, blockID, index, encodingVersion) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*modelaccess.TransactionResult) @@ -1058,7 +1058,7 @@ func (_m *API) GetTransactionResultByIndex(ctx context.Context, blockID flow.Ide } if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, uint32, entities.EventEncodingVersion) error); ok { - r1 = rf(ctx, blockID, index, requiredEventEncodingVersion) + r1 = rf(ctx, blockID, index, encodingVersion) } else { r1 = ret.Error(1) } @@ -1066,9 +1066,9 @@ func (_m *API) GetTransactionResultByIndex(ctx context.Context, blockID flow.Ide return r0, r1 } -// GetTransactionResultsByBlockID provides a mock function with given fields: ctx, blockID, requiredEventEncodingVersion -func (_m *API) GetTransactionResultsByBlockID(ctx context.Context, blockID flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) ([]*modelaccess.TransactionResult, error) { - ret := _m.Called(ctx, blockID, requiredEventEncodingVersion) +// GetTransactionResultsByBlockID provides a mock function with given fields: ctx, blockID, encodingVersion +func (_m *API) GetTransactionResultsByBlockID(ctx context.Context, blockID flow.Identifier, encodingVersion entities.EventEncodingVersion) ([]*modelaccess.TransactionResult, error) { + ret := _m.Called(ctx, blockID, encodingVersion) if len(ret) == 0 { panic("no return value specified for GetTransactionResultsByBlockID") @@ -1077,10 +1077,10 @@ func (_m *API) GetTransactionResultsByBlockID(ctx context.Context, blockID flow. var r0 []*modelaccess.TransactionResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, entities.EventEncodingVersion) ([]*modelaccess.TransactionResult, error)); ok { - return rf(ctx, blockID, requiredEventEncodingVersion) + return rf(ctx, blockID, encodingVersion) } if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, entities.EventEncodingVersion) []*modelaccess.TransactionResult); ok { - r0 = rf(ctx, blockID, requiredEventEncodingVersion) + r0 = rf(ctx, blockID, encodingVersion) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*modelaccess.TransactionResult) @@ -1088,7 +1088,7 @@ func (_m *API) GetTransactionResultsByBlockID(ctx context.Context, blockID flow. } if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, entities.EventEncodingVersion) error); ok { - r1 = rf(ctx, blockID, requiredEventEncodingVersion) + r1 = rf(ctx, blockID, encodingVersion) } else { r1 = ret.Error(1) } diff --git a/access/validator/validator.go b/access/validator/validator.go index d3ad5304baf..f81d5bfb525 100644 --- a/access/validator/validator.go +++ b/access/validator/validator.go @@ -234,7 +234,6 @@ func (v *TransactionValidator) initValidationSteps() { } func (v *TransactionValidator) Validate(ctx context.Context, tx *flow.TransactionBody) (err error) { - for _, step := range v.validationSteps { if err = step.check(tx); err != nil { v.transactionValidationMetrics.TransactionValidationFailed(step.failReason) diff --git a/actions/promote-images/action.yml b/actions/promote-images/action.yml index ace4fb9131d..e230f3e8b6d 100644 --- a/actions/promote-images/action.yml +++ b/actions/promote-images/action.yml @@ -1,5 +1,5 @@ -name: Promote Image to Public Registry -description: Pull image from private registry and push to public registry +name: Promote Image to another Registry +description: Pull image from private registry and push to another registry inputs: gcp_credentials: @@ -11,8 +11,8 @@ inputs: private_registry_host: description: 'Private Google Artifact Registry hostname' required: true - public_registry: - description: 'Public container registry URL' + promotion_registry: + description: 'Registry to promote images to' required: true role: description: 'Role to promote' @@ -43,29 +43,29 @@ runs: # Convert comma-separated tags input into an array IFS=',' read -ra TAGS <<< "${{ inputs.tags }}" - # Loop through each tag and pull the image from the private registry, then tag it for the public registry + # Loop through each tag and pull the image from the private registry, then tag it for the registry to promote to for TAG in "${TAGS[@]}"; do IMAGE_PRIVATE="${{ inputs.private_registry }}/${{ inputs.role }}:${TAG}" - IMAGE_PUBLIC="${{ inputs.public_registry }}/${{ inputs.role }}:${TAG}" - echo "Processing ${IMAGE_PRIVATE} -> ${IMAGE_PUBLIC}" + IMAGE_PROMOTION="${{ inputs.promotion_registry }}/${{ inputs.role }}:${TAG}" + echo "Processing ${IMAGE_PRIVATE} -> ${IMAGE_PROMOTION}" docker pull "${IMAGE_PRIVATE}" - docker tag "${IMAGE_PRIVATE}" "${IMAGE_PUBLIC}" + docker tag "${IMAGE_PRIVATE}" "${IMAGE_PROMOTION}" done - - name: Authenticate with Public Registry + - name: Authenticate with registry to promote to run: | gcloud auth configure-docker shell: bash - - name: Push Images to Public Registry + - name: Push Images to registry to promote to shell: bash run: | # Convert comma-separated tags input into an array IFS=',' read -ra TAGS <<< "${{ inputs.tags }}" - # Loop through each tag and push the image to the public registry + # Loop through each tag and push the image to the promotion registry for TAG in "${TAGS[@]}"; do - IMAGE_PUBLIC="${{ inputs.public_registry }}/${{ inputs.role }}:${TAG}" - echo "Pushing Image ${IMAGE_PUBLIC} to Public registry" - docker push "${IMAGE_PUBLIC}" + IMAGE_PROMOTION="${{ inputs.promotion_registry }}/${{ inputs.role }}:${TAG}" + echo "Pushing Image ${IMAGE_PROMOTION} to Public registry" + docker push "${IMAGE_PROMOTION}" done diff --git a/admin/commands/storage/backfill_tx_error_messages.go b/admin/commands/storage/backfill_tx_error_messages.go index ab6e980b009..81559eaf776 100644 --- a/admin/commands/storage/backfill_tx_error_messages.go +++ b/admin/commands/storage/backfill_tx_error_messages.go @@ -154,7 +154,7 @@ func (b *BackfillTxErrorMessagesCommand) Handler(ctx context.Context, request *a } blockID := header.ID() - err = b.txErrorMessagesCore.HandleTransactionResultErrorMessagesByENs(ctx, blockID, data.executionNodeIds) + err = b.txErrorMessagesCore.FetchErrorMessagesByENs(ctx, blockID, data.executionNodeIds) if err != nil { return nil, fmt.Errorf("error encountered while processing transaction result error message for block: %d, %w", height, err) } diff --git a/admin/commands/storage/backfill_tx_error_messages_test.go b/admin/commands/storage/backfill_tx_error_messages_test.go index 2f80106c0de..a9e7deaedf1 100644 --- a/admin/commands/storage/backfill_tx_error_messages_test.go +++ b/admin/commands/storage/backfill_tx_error_messages_test.go @@ -10,17 +10,20 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/admin" "github.com/onflow/flow-go/admin/commands" + "github.com/onflow/flow-go/engine/access/index" "github.com/onflow/flow-go/engine/access/ingestion/tx_error_messages" accessmock "github.com/onflow/flow-go/engine/access/mock" "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/invalid" protocolmock "github.com/onflow/flow-go/state/protocol/mock" @@ -47,6 +50,10 @@ type BackfillTxErrorMessagesSuite struct { receipts *storagemock.ExecutionReceipts headers *storagemock.Headers + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + txResultsIndex *index.TransactionResultsIndex + execClient *accessmock.ExecutionAPIClient connFactory *connectionmock.ConnectionFactory @@ -75,6 +82,13 @@ func (suite *BackfillTxErrorMessagesSuite) SetupTest() { suite.receipts = new(storagemock.ExecutionReceipts) suite.transactionResults = storagemock.NewLightTransactionResults(suite.T()) suite.txErrorMessages = new(storagemock.TransactionResultErrorMessages) + suite.reporter = syncmock.NewIndexReporter(suite.T()) + + suite.indexReporter = index.NewReporter() + err := suite.indexReporter.Initialize(suite.reporter) + suite.Require().NoError(err) + suite.txResultsIndex = index.NewTransactionResultsIndex(suite.indexReporter, suite.transactionResults) + suite.execClient = new(accessmock.ExecutionAPIClient) suite.blockCount = 5 @@ -132,25 +146,18 @@ func (suite *BackfillTxErrorMessagesSuite) SetupTest() { nil, ) - var err error - suite.backend, err = backend.New(backend.Params{ - State: suite.state, - ExecutionReceipts: suite.receipts, - ConnFactory: suite.connFactory, - MaxHeightRange: backend.DefaultMaxHeightRange, - Log: suite.log, - SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, - Communicator: backend.NewNodeCommunicator(false), - ScriptExecutionMode: backend.IndexQueryModeExecutionNodesOnly, - TxResultQueryMode: backend.IndexQueryModeExecutionNodesOnly, - ChainID: flow.Testnet, - ExecNodeIdentitiesProvider: executionNodeIdentitiesProvider, - }) - require.NoError(suite.T(), err) + errorMessageProvider := error_messages.NewTxErrorMessageProvider( + suite.log, + suite.txErrorMessages, + suite.txResultsIndex, + suite.connFactory, + node_communicator.NewNodeCommunicator(false), + executionNodeIdentitiesProvider, + ) suite.txResultErrorMessagesCore = tx_error_messages.NewTxErrorMessagesCore( suite.log, - suite.backend, + errorMessageProvider, suite.txErrorMessages, executionNodeIdentitiesProvider, ) diff --git a/admin/commands/storage/pebble_checkpoint.go b/admin/commands/storage/pebble_checkpoint.go index 358e23474f7..64a2274610b 100644 --- a/admin/commands/storage/pebble_checkpoint.go +++ b/admin/commands/storage/pebble_checkpoint.go @@ -6,7 +6,7 @@ import ( "path" "time" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/rs/zerolog/log" "github.com/onflow/flow-go/admin" diff --git a/admin/commands/storage/read_range_cluster_blocks.go b/admin/commands/storage/read_range_cluster_blocks.go index f28e3bcd7e7..b0e41b86fe8 100644 --- a/admin/commands/storage/read_range_cluster_blocks.go +++ b/admin/commands/storage/read_range_cluster_blocks.go @@ -4,14 +4,14 @@ import ( "context" "fmt" - "github.com/dgraph-io/badger/v2" "github.com/rs/zerolog/log" "github.com/onflow/flow-go/admin" "github.com/onflow/flow-go/admin/commands" "github.com/onflow/flow-go/cmd/util/cmd/read-light-block" "github.com/onflow/flow-go/model/flow" - storage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) var _ commands.AdminCommand = (*ReadRangeClusterBlocksCommand)(nil) @@ -21,12 +21,12 @@ var _ commands.AdminCommand = (*ReadRangeClusterBlocksCommand)(nil) const Max_Range_Cluster_Block_Limit = uint64(10001) type ReadRangeClusterBlocksCommand struct { - db *badger.DB - headers *storage.Headers - payloads *storage.ClusterPayloads + db storage.DB + headers *store.Headers + payloads *store.ClusterPayloads } -func NewReadRangeClusterBlocksCommand(db *badger.DB, headers *storage.Headers, payloads *storage.ClusterPayloads) commands.AdminCommand { +func NewReadRangeClusterBlocksCommand(db storage.DB, headers *store.Headers, payloads *store.ClusterPayloads) commands.AdminCommand { return &ReadRangeClusterBlocksCommand{ db: db, headers: headers, @@ -51,7 +51,7 @@ func (c *ReadRangeClusterBlocksCommand) Handler(ctx context.Context, req *admin. return nil, admin.NewInvalidAdminReqErrorf("getting for more than %v blocks at a time might have an impact to node's performance and is not allowed", Max_Range_Cluster_Block_Limit) } - clusterBlocks := storage.NewClusterBlocks( + clusterBlocks := store.NewClusterBlocks( c.db, flow.ChainID(chainID), c.headers, c.payloads, ) diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index e22c0239965..9796ad58fdc 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -13,8 +13,6 @@ import ( "github.com/ipfs/boxo/bitswap" "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - badgerds "github.com/ipfs/go-ds-badger2" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/routing" "github.com/onflow/crypto" @@ -51,6 +49,10 @@ import ( "github.com/onflow/flow-go/engine/access/rest/websockets" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/events" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" rpcConnection "github.com/onflow/flow-go/engine/access/rpc/connection" "github.com/onflow/flow-go/engine/access/state_stream" statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" @@ -114,6 +116,7 @@ import ( "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" + statedatastore "github.com/onflow/flow-go/state/protocol/datastore" "github.com/onflow/flow-go/storage" bstorage "github.com/onflow/flow-go/storage/badger" pstorage "github.com/onflow/flow-go/storage/pebble" @@ -209,7 +212,7 @@ func DefaultAccessNodeConfig() *AccessNodeConfig { CollectionClientTimeout: 3 * time.Second, ExecutionClientTimeout: 3 * time.Second, ConnectionPoolSize: backend.DefaultConnectionPoolSize, - MaxHeightRange: backend.DefaultMaxHeightRange, + MaxHeightRange: events.DefaultMaxHeightRange, PreferredExecutionNodeIDs: nil, FixedExecutionNodeIDs: nil, CircuitBreakerConfig: rpcConnection.CircuitBreakerConfig{ @@ -218,9 +221,9 @@ func DefaultAccessNodeConfig() *AccessNodeConfig { MaxFailures: 5, MaxRequests: 1, }, - ScriptExecutionMode: backend.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now - EventQueryMode: backend.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now - TxResultQueryMode: backend.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now }, RestConfig: rest.Config{ ListenAddress: "", @@ -371,8 +374,9 @@ type FlowAccessNodeBuilder struct { stateStreamBackend *statestreambackend.StateStreamBackend nodeBackend *backend.Backend - ExecNodeIdentitiesProvider *commonrpc.ExecutionNodeIdentitiesProvider - TxResultErrorMessagesCore *tx_error_messages.TxErrorMessagesCore + ExecNodeIdentitiesProvider *commonrpc.ExecutionNodeIdentitiesProvider + TxResultErrorMessagesCore *tx_error_messages.TxErrorMessagesCore + txResultErrorMessageProvider error_messages.Provider } func (builder *FlowAccessNodeBuilder) buildFollowerState() *FlowAccessNodeBuilder { @@ -442,7 +446,7 @@ func (builder *FlowAccessNodeBuilder) buildFollowerCore() *FlowAccessNodeBuilder builder.Component("follower core", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // create a finalizer that will handle updating the protocol // state when the follower detects newly finalized blocks - final := finalizer.NewFinalizer(node.DB, node.Storage.Headers, builder.FollowerState, node.Tracer) + final := finalizer.NewFinalizer(node.ProtocolDB.Reader(), node.Storage.Headers, builder.FollowerState, node.Tracer) packer := signature.NewConsensusSigDataPacker(builder.Committee) // initialize the verifier for the protocol consensus @@ -558,7 +562,6 @@ func (builder *FlowAccessNodeBuilder) BuildConsensusFollower() *FlowAccessNodeBu } func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccessNodeBuilder { - var ds datastore.Batching var bs network.BlobService var processedBlockHeight storage.ConsumerProgressInitializer var processedNotifications storage.ConsumerProgressInitializer @@ -566,7 +569,6 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess var execDataDistributor *edrequester.ExecutionDataDistributor var execDataCacheBackend *herocache.BlockExecutionData var executionDataStoreCache *execdatacache.ExecutionDataCache - var executionDataDBMode execution_data.ExecutionDataDBMode // setup dependency chain to ensure indexer starts after the requester requesterDependable := module.NewProxiedReadyDoneAware() @@ -590,38 +592,14 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess return nil }). Module("execution data datastore and blobstore", func(node *cmd.NodeConfig) error { - datastoreDir := filepath.Join(builder.executionDataDir, "blobstore") - err := os.MkdirAll(datastoreDir, 0700) - if err != nil { - return err - } - - executionDataDBMode, err = execution_data.ParseExecutionDataDBMode(builder.executionDataDBMode) + var err error + builder.ExecutionDatastoreManager, err = edstorage.CreateDatastoreManager( + node.Logger, builder.executionDataDir, builder.executionDataDBMode) if err != nil { - return fmt.Errorf("could not parse execution data DB mode: %w", err) - } - - if executionDataDBMode == execution_data.ExecutionDataDBModePebble { - builder.ExecutionDatastoreManager, err = edstorage.NewPebbleDatastoreManager( - node.Logger.With().Str("pebbledb", "endata").Logger(), - datastoreDir, nil) - if err != nil { - return fmt.Errorf("could not create PebbleDatastoreManager for execution data: %w", err) - } - } else { - builder.ExecutionDatastoreManager, err = edstorage.NewBadgerDatastoreManager(datastoreDir, &badgerds.DefaultOptions) - if err != nil { - return fmt.Errorf("could not create BadgerDatastoreManager for execution data: %w", err) - } + return fmt.Errorf("could not create execution data datastore manager: %w", err) } - ds = builder.ExecutionDatastoreManager.Datastore() - builder.ShutdownFunc(func() error { - if err := builder.ExecutionDatastoreManager.Close(); err != nil { - return fmt.Errorf("could not close execution data datastore: %w", err) - } - return nil - }) + builder.ShutdownFunc(builder.ExecutionDatastoreManager.Close) return nil }). @@ -646,7 +624,7 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess return nil }). Module("execution datastore", func(node *cmd.NodeConfig) error { - builder.ExecutionDataBlobstore = blobs.NewBlobstore(ds) + builder.ExecutionDataBlobstore = blobs.NewBlobstore(builder.ExecutionDatastoreManager.Datastore()) builder.ExecutionDataStore = execution_data.NewExecutionDataStore(builder.ExecutionDataBlobstore, execution_data.DefaultSerializer) return nil }). @@ -689,7 +667,7 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess } var err error - bs, err = node.EngineRegistry.RegisterBlobService(channels.ExecutionDataService, ds, opts...) + bs, err = node.EngineRegistry.RegisterBlobService(channels.ExecutionDataService, builder.ExecutionDatastoreManager.Datastore(), opts...) if err != nil { return nil, fmt.Errorf("could not register blob service: %w", err) } @@ -850,7 +828,7 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess net := builder.AccessNodeConfig.PublicNetworkConfig.Network var err error - builder.PublicBlobService, err = net.RegisterBlobService(channels.PublicExecutionDataService, ds, opts...) + builder.PublicBlobService, err = net.RegisterBlobService(channels.PublicExecutionDataService, builder.ExecutionDatastoreManager.Datastore(), opts...) if err != nil { return nil, fmt.Errorf("could not register blob service: %w", err) } @@ -978,6 +956,7 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess builder.RootChainID.Chain(), indexerDerivedChainData, notNil(builder.collectionExecutedMetric), + node.StorageLockMgr, ) if err != nil { return nil, err @@ -1064,7 +1043,7 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess } broadcaster := engine.NewBroadcaster() - eventQueryMode, err := backend.ParseIndexQueryMode(builder.rpcConf.BackendConfig.EventQueryMode) + eventQueryMode, err := query_mode.ParseIndexQueryMode(builder.rpcConf.BackendConfig.EventQueryMode) if err != nil { return nil, fmt.Errorf("could not parse event query mode: %w", err) } @@ -1072,7 +1051,7 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess // use the events index for events if enabled and the node is configured to use it for // regular event queries useIndex := builder.executionDataIndexingEnabled && - eventQueryMode != backend.IndexQueryModeExecutionNodesOnly + eventQueryMode != query_mode.IndexQueryModeExecutionNodesOnly executionDataTracker := subscriptiontracker.NewExecutionDataTracker( builder.Logger, @@ -1617,7 +1596,7 @@ func (builder *FlowAccessNodeBuilder) InitIDProviders() { builder.SyncEngineParticipantsProviderFactory = func() module.IdentifierProvider { return id.NewIdentityFilterIdentifierProvider( filter.And( - filter.HasRole[flow.Identity](flow.RoleConsensus), + filter.HasRole[flow.Identity](flow.RoleExecution), filter.Not(filter.HasNodeID[flow.Identity](node.Me.NodeID())), filter.NotEjectedFilter, ), @@ -1656,7 +1635,7 @@ func (builder *FlowAccessNodeBuilder) Initialize() error { builder.EnqueueTracer() builder.PreInit(cmd.DynamicStartPreInit) - builder.ValidateRootSnapshot(badgerState.ValidRootSnapshotContainsEntityExpiryRange) + builder.ValidateRootSnapshot(statedatastore.ValidRootSnapshotContainsEntityExpiryRange) return nil } @@ -1991,16 +1970,16 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { ), } - scriptExecMode, err := backend.ParseIndexQueryMode(config.BackendConfig.ScriptExecutionMode) + scriptExecMode, err := query_mode.ParseIndexQueryMode(config.BackendConfig.ScriptExecutionMode) if err != nil { return nil, fmt.Errorf("could not parse script execution mode: %w", err) } - eventQueryMode, err := backend.ParseIndexQueryMode(config.BackendConfig.EventQueryMode) + eventQueryMode, err := query_mode.ParseIndexQueryMode(config.BackendConfig.EventQueryMode) if err != nil { return nil, fmt.Errorf("could not parse event query mode: %w", err) } - if eventQueryMode == backend.IndexQueryModeCompare { + if eventQueryMode == query_mode.IndexQueryModeCompare { return nil, fmt.Errorf("event query mode 'compare' is not supported") } @@ -2016,11 +1995,11 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { if err != nil { return nil, fmt.Errorf("failed to initialize block tracker: %w", err) } - txResultQueryMode, err := backend.ParseIndexQueryMode(config.BackendConfig.TxResultQueryMode) + txResultQueryMode, err := query_mode.ParseIndexQueryMode(config.BackendConfig.TxResultQueryMode) if err != nil { return nil, fmt.Errorf("could not parse transaction result query mode: %w", err) } - if txResultQueryMode == backend.IndexQueryModeCompare { + if txResultQueryMode == query_mode.IndexQueryModeCompare { return nil, fmt.Errorf("transaction result query mode 'compare' is not supported") } @@ -2054,6 +2033,16 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { fixedENIdentifiers, ) + nodeCommunicator := node_communicator.NewNodeCommunicator(backendConfig.CircuitBreakerConfig.Enabled) + builder.txResultErrorMessageProvider = error_messages.NewTxErrorMessageProvider( + node.Logger, + builder.transactionResultErrorMessages, // might be nil + notNil(builder.TxResultsIndex), + connFactory, + nodeCommunicator, + notNil(builder.ExecNodeIdentitiesProvider), + ) + builder.nodeBackend, err = backend.New(backend.Params{ State: node.State, CollectionRPC: builder.CollectionRPC, // might be nil @@ -2072,7 +2061,7 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { MaxHeightRange: backendConfig.MaxHeightRange, Log: node.Logger, SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, - Communicator: backend.NewNodeCommunicator(backendConfig.CircuitBreakerConfig.Enabled), + Communicator: nodeCommunicator, TxResultCacheSize: builder.TxResultCacheSize, ScriptExecutor: notNil(builder.ScriptExecutor), ScriptExecutionMode: scriptExecMode, @@ -2093,6 +2082,7 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { IndexReporter: indexReporter, VersionControl: notNil(builder.VersionControl), ExecNodeIdentitiesProvider: notNil(builder.ExecNodeIdentitiesProvider), + TxErrorMessageProvider: notNil(builder.txResultErrorMessageProvider), }) if err != nil { return nil, fmt.Errorf("could not initialize backend: %w", err) @@ -2133,7 +2123,7 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { var err error builder.RequestEng, err = requester.New( - node.Logger, + node.Logger.With().Str("entity", "collection").Logger(), node.Metrics.Engine, node.EngineRegistry, node.Me, @@ -2149,34 +2139,42 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { if builder.storeTxResultErrorMessages { builder.TxResultErrorMessagesCore = tx_error_messages.NewTxErrorMessagesCore( node.Logger, - notNil(builder.nodeBackend), + notNil(builder.txResultErrorMessageProvider), builder.transactionResultErrorMessages, notNil(builder.ExecNodeIdentitiesProvider), ) } + collectionSyncer := ingestion.NewCollectionSyncer( + node.Logger, + notNil(builder.collectionExecutedMetric), + builder.RequestEng, + node.State, + node.Storage.Blocks, + notNil(builder.collections), + notNil(builder.transactions), + lastFullBlockHeight, + node.StorageLockMgr, + ) + builder.RequestEng.WithHandle(collectionSyncer.OnCollectionDownloaded) + builder.IngestEng, err = ingestion.New( node.Logger, node.EngineRegistry, node.State, node.Me, - builder.RequestEng, node.Storage.Blocks, - node.Storage.Headers, - notNil(builder.collections), - notNil(builder.transactions), node.Storage.Results, node.Storage.Receipts, - notNil(builder.collectionExecutedMetric), processedFinalizedBlockHeight, - lastFullBlockHeight, + notNil(collectionSyncer), + notNil(builder.collectionExecutedMetric), notNil(builder.TxResultErrorMessagesCore), ) if err != nil { return nil, err } ingestionDependable.Init(builder.IngestEng) - builder.RequestEng.WithHandle(builder.IngestEng.OnCollection) builder.FollowerDistributor.AddOnBlockFinalizedConsumer(builder.IngestEng.OnFinalizedBlock) return builder.IngestEng, nil diff --git a/cmd/bootstrap/README.md b/cmd/bootstrap/README.md index 1d4eef5f580..28caa0d34ca 100644 --- a/cmd/bootstrap/README.md +++ b/cmd/bootstrap/README.md @@ -3,9 +3,9 @@ This package contains script for generating the bootstrap files needed to initialize the Flow network. The high-level bootstrapping process is described in [Notion](https://www.notion.so/dapperlabs/Flow-Bootstrapping-ce9d227f18a8410dbce74ed7d4ddee27). -WARNING: These scripts use Go's crypto/rand package to generate seeds for private keys. Make sure you are running the bootstrap scripts on a machine that does provide proper a low-level implementation. See https://golang.org/pkg/crypto/rand/ for details. +WARNING: These scripts use Go's crypto/rand package to generate seeds for private keys, whenever seeds are not provided to the commands. Make sure you are running the bootstrap scripts on a machine that does provide a low-level cryptographically secure RNG. See https://golang.org/pkg/crypto/rand/ for details. -NOTE: Public and private keys are encoded in JSON files as base64 strings, not as hex, contrary to what might be expected. +NOTE: Public and private keys are encoded in JSON files as hex strings. Code structure: * `cmd/bootstrap/cmd` contains CLI logic that can exit the program and read/write files. It also uses structures and data types that are purely relevant for CLI purposes, such as encoding, decoding, etc. @@ -18,9 +18,9 @@ Code structure: The bootstrapping will generate the following information: #### Per node -* Staking key (BLS key with curve BLS12-381) -* Networking key (ECDSA key) -* Random beacon key; _only_ for consensus nodes (BLS based on Joint-Feldman DKG for threshold signatures) +* Staking private key (BLS key on curve BLS12-381) +* Networking private key (ECDSA key on curve P-256) +* Random beacon private key; _only_ for consensus nodes (BLS key on curve BLS12-381, used for a BLS-based threshold signatures) #### Node Identities * List of all authorized Flow nodes @@ -28,6 +28,7 @@ The bootstrapping will generate the following information: - node ID - node role - public staking key + - proof of possession of the staking private key - public networking key - weight @@ -61,7 +62,8 @@ Values directly specified as command line parameters: Values can be specified as command line parameters: - seed for generating staking key (min 48 bytes in hex encoding) - seed for generating networking key (min 48 bytes in hex encoding) -If seeds are not provided, the CLI will try to use the system's pseudo-random number generator (PRNG), e. g. `dev/urandom`. Make sure you are running the CLI on a hardware that has a cryptographically secure PRNG, or provide seeds generated on such a system. +Provided seeds must be of high entropy, ideally generated by a crypto secure RNG. +If seeds are not provided, the CLI will try to use the system's random number generator (RNG), e. g. `dev/urandom`. Make sure you are running the CLI on a hardware that has a cryptographically secure RNG. #### Example ```bash @@ -76,7 +78,7 @@ go run ./cmd/bootstrap key --address "example.com:1234" --role "consensus" -o ./ file needs to be available to respective partner node at boot up (or recovery after crash) * file `.node-info.pub.json` - public information - - file needs to be delivered to Dapper Labs for Phase 2 of generating root information, + - file needs to be delivered to the Flow Foundation team for Phase 2 of generating root information, but is not required at node start diff --git a/cmd/bootstrap/cmd/block.go b/cmd/bootstrap/cmd/block.go index b582eb21b3b..1ed56b87884 100644 --- a/cmd/bootstrap/cmd/block.go +++ b/cmd/bootstrap/cmd/block.go @@ -2,6 +2,7 @@ package cmd import ( "encoding/hex" + "fmt" "time" "github.com/onflow/flow-go/cmd/bootstrap/run" @@ -44,20 +45,24 @@ func constructRootEpochEvents( clusterQCs []*flow.QuorumCertificate, dkgData dkg.ThresholdKeySet, dkgIndexMap flow.DKGIndexMap, -) (*flow.EpochSetup, *flow.EpochCommit) { - - epochSetup := &flow.EpochSetup{ - Counter: flagEpochCounter, - FirstView: firstView, - FinalView: firstView + flagNumViewsInEpoch - 1, - DKGPhase1FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase - 1, - DKGPhase2FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase*2 - 1, - DKGPhase3FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase*3 - 1, - Participants: participants.Sort(flow.Canonical[flow.Identity]).ToSkeleton(), - Assignments: assignments, - RandomSource: GenerateRandomSeed(flow.EpochSetupRandomSourceLength), - TargetDuration: flagEpochTimingDuration, - TargetEndTime: rootEpochTargetEndTime(), +) (*flow.EpochSetup, *flow.EpochCommit, error) { + epochSetup, err := flow.NewEpochSetup( + flow.UntrustedEpochSetup{ + Counter: flagEpochCounter, + FirstView: firstView, + DKGPhase1FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase - 1, + DKGPhase2FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase*2 - 1, + DKGPhase3FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase*3 - 1, + FinalView: firstView + flagNumViewsInEpoch - 1, + Participants: participants.Sort(flow.Canonical[flow.Identity]).ToSkeleton(), + Assignments: assignments, + RandomSource: GenerateRandomSeed(flow.EpochSetupRandomSourceLength), + TargetDuration: flagEpochTimingDuration, + TargetEndTime: rootEpochTargetEndTime(), + }, + ) + if err != nil { + return nil, nil, fmt.Errorf("could not construct epoch setup: %w", err) } qcsWithSignerIDs := make([]*flow.QuorumCertificateWithSignerIDs, 0, len(clusterQCs)) @@ -75,14 +80,20 @@ func constructRootEpochEvents( }) } - epochCommit := &flow.EpochCommit{ - Counter: flagEpochCounter, - ClusterQCs: flow.ClusterQCVoteDatasFromQCs(qcsWithSignerIDs), - DKGGroupKey: dkgData.PubGroupKey, - DKGParticipantKeys: dkgData.PubKeyShares, - DKGIndexMap: dkgIndexMap, + epochCommit, err := flow.NewEpochCommit( + flow.UntrustedEpochCommit{ + Counter: flagEpochCounter, + ClusterQCs: flow.ClusterQCVoteDatasFromQCs(qcsWithSignerIDs), + DKGGroupKey: dkgData.PubGroupKey, + DKGParticipantKeys: dkgData.PubKeyShares, + DKGIndexMap: dkgIndexMap, + }, + ) + if err != nil { + return nil, nil, fmt.Errorf("could not construct epoch commit: %w", err) } - return epochSetup, epochCommit + + return epochSetup, epochCommit, nil } func parseChainID(chainID string) flow.ChainID { diff --git a/cmd/bootstrap/cmd/final_list.go b/cmd/bootstrap/cmd/final_list.go deleted file mode 100644 index cc41c741881..00000000000 --- a/cmd/bootstrap/cmd/final_list.go +++ /dev/null @@ -1,325 +0,0 @@ -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/onflow/flow-go/cmd" - "github.com/onflow/flow-go/cmd/util/cmd/common" - model "github.com/onflow/flow-go/model/bootstrap" - "github.com/onflow/flow-go/model/flow" -) - -var ( - flagStakingNodesPath string -) - -// finalListCmd represents the final list command -var finalListCmd = &cobra.Command{ - Use: "finallist", - Short: "generates a final list of nodes to be used for next network", - Long: "generates a final list of nodes to be used for next network after validating node data and matching against staking contract nodes ", - Run: finalList, -} - -func init() { - rootCmd.AddCommand(finalListCmd) - addFinalListFlags() -} - -func addFinalListFlags() { - // partner node info flag - finalListCmd.Flags().StringVar(&flagPartnerNodeInfoDir, "partner-infos", "", "path to a directory containing all parnter nodes details") - cmd.MarkFlagRequired(finalListCmd, "partner-infos") - - // internal/flow node info flag - finalListCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "flow-infos", "", "path to a directory containing all internal/flow nodes details") - cmd.MarkFlagRequired(finalListCmd, "flow-infos") - - // staking nodes dir containing staking nodes json - finalListCmd.Flags().StringVar(&flagStakingNodesPath, "staking-nodes", "", "path to a JSON file of all staking nodes") - cmd.MarkFlagRequired(finalListCmd, "staking-nodes") - - finalListCmd.Flags().UintVar(&flagCollectionClusters, "collection-clusters", 2, - "number of collection clusters") -} - -func finalList(cmd *cobra.Command, args []string) { - // read public partner node infos - log.Info().Msgf("reading partner public node information: %s", flagPartnerNodeInfoDir) - partnerNodes := assemblePartnerNodesWithoutWeight() - - // read internal private node infos - log.Info().Msgf("reading internal/flow private node information: %s", flagInternalNodePrivInfoDir) - internalNodes := assembleInternalNodesWithoutWeight() - - log.Info().Msg("checking constraints on consensus/cluster nodes") - checkConstraints(partnerNodes, internalNodes) - - // nodes which are registered on-chain - log.Info().Msgf("reading staking contract node information: %s", flagStakingNodesPath) - registeredNodes := readStakingContractDetails() - - // merge internal and partner node infos (from local files) - localNodes, err := mergeNodeInfos(internalNodes, partnerNodes) - if err != nil { - log.Fatal().Err(err).Msg("failed to merge node infos") - } - - // reconcile nodes from staking contract nodes - validateNodes(localNodes, registeredNodes) - - // write node-config.json with the new list of nodes to be used for the `finalize` command - err = common.WriteJSON(model.PathFinallist, flagOutdir, model.ToPublicNodeInfoList(localNodes)) - if err != nil { - log.Fatal().Err(err).Msg("failed to write json") - } - log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathFinallist) -} - -func validateNodes(localNodes []model.NodeInfo, registeredNodes []model.NodeInfo) { - // check node count - if len(localNodes) != len(registeredNodes) { - log.Error(). - Int("local", len(localNodes)). - Int("onchain", len(registeredNodes)). - Msg("onchain node count does not match local internal+partner node count") - } - - // check registered and local nodes to make sure node ID are not missing - validateNodeIDs(localNodes, registeredNodes) - - // print mismatching nodes - checkMismatchingNodes(localNodes, registeredNodes) - - // create map - localNodeMap := make(map[flow.Identifier]model.NodeInfo) - for _, node := range localNodes { - localNodeMap[node.NodeID] = node - } - - // check node type mismatch - for _, registeredNode := range registeredNodes { - - // win have matching node as we have a check before - matchingNode := localNodeMap[registeredNode.NodeID] - - // check node type and error if mismatch - if matchingNode.Role != registeredNode.Role { - log.Error(). - Str("registered node id", registeredNode.NodeID.String()). - Str("registered node role", registeredNode.Role.String()). - Str("local node", matchingNode.NodeID.String()). - Str("local node role", matchingNode.Role.String()). - Msg("node role does not match") - } - - if matchingNode.Address != registeredNode.Address { - log.Error(). - Str("registered node id", registeredNode.NodeID.String()). - Str("registered node address", registeredNode.Address). - Str("local node", matchingNode.NodeID.String()). - Str("local node address", matchingNode.Address). - Msg("node address does not match") - } - - // check address match - if matchingNode.Address != registeredNode.Address { - log.Warn(). - Str("registered node", registeredNode.NodeID.String()). - Str("node id", matchingNode.NodeID.String()). - Msg("address do not match") - } - - // flow localNodes contain private key info - if matchingNode.NetworkPubKey().String() != "" { - // check networking pubkey match - matchNodeKey := matchingNode.NetworkPubKey().String() - registeredNodeKey := registeredNode.NetworkPubKey().String() - - if matchNodeKey != registeredNodeKey { - log.Error(). - Str("registered network key", registeredNodeKey). - Str("network key", matchNodeKey). - Msg("networking keys do not match") - } - } - - // flow localNodes contain privatekey info - if matchingNode.StakingPubKey().String() != "" { - matchNodeKey := matchingNode.StakingPubKey().String() - registeredNodeKey := registeredNode.StakingPubKey().String() - - if matchNodeKey != registeredNodeKey { - log.Error(). - Str("registered staking key", registeredNodeKey). - Str("staking key", matchNodeKey). - Msg("staking keys do not match") - } - } - } -} - -// validateNodeIDs will go through both sets of nodes and ensure that no node-id -// are missing. It will log all missing node ID's and throw an error. -func validateNodeIDs(localNodes []model.NodeInfo, registeredNodes []model.NodeInfo) { - - // go through registered nodes - invalidStakingNodes := make([]model.NodeInfo, 0) - for _, node := range registeredNodes { - if node.NodeID.String() == "" { - - // we warn here but exit later - invalidStakingNodes = append(invalidStakingNodes, node) - log.Warn(). - Str("node-address", node.Address). - Msg("missing node-id from registered nodes") - } - } - - // go through local nodes - invalidNodes := make([]model.NodeInfo, 0) - for _, node := range localNodes { - if node.NodeID.String() == "" { - - // we warn here but exit later - invalidNodes = append(invalidNodes, node) - log.Warn(). - Str("node-address", node.Address). - Msg("missing node-id from local nodes") - } - } - - if len(invalidNodes) != 0 || len(invalidStakingNodes) != 0 { - log.Fatal().Msg("found missing nodes ids. fix and re-run") - } -} - -func checkMismatchingNodes(localNodes []model.NodeInfo, registeredNodes []model.NodeInfo) { - - localNodesByID := make(map[flow.Identifier]model.NodeInfo) - for _, node := range localNodes { - localNodesByID[node.NodeID] = node - } - - registeredNodesByID := make(map[flow.Identifier]model.NodeInfo) - for _, node := range registeredNodes { - registeredNodesByID[node.NodeID] = node - } - - // try match local nodes to registered nodes - invalidLocalNodes := make([]model.NodeInfo, 0) - for _, node := range localNodes { - if _, ok := registeredNodesByID[node.NodeID]; !ok { - log.Warn(). - Str("local-node-id", node.NodeID.String()). - Str("role", node.Role.String()). - Str("address", node.Address). - Msg("matching registered node not found for local node") - invalidLocalNodes = append(invalidLocalNodes, node) - } - } - - invalidRegisteredNodes := make([]model.NodeInfo, 0) - for _, node := range registeredNodes { - if _, ok := localNodesByID[node.NodeID]; !ok { - log.Warn(). - Str("registered-node-id", node.NodeID.String()). - Str("role", node.Role.String()). - Str("address", node.Address). - Msg("matching local node not found for local node") - invalidRegisteredNodes = append(invalidRegisteredNodes, node) - } - } - - if len(invalidLocalNodes) != 0 || len(invalidRegisteredNodes) != 0 { - log.Fatal().Msg("found missing mismatching nodes") - } -} - -func assembleInternalNodesWithoutWeight() []model.NodeInfo { - privInternals, err := common.ReadInternalNodeInfos(flagInternalNodePrivInfoDir) - if err != nil { - log.Fatal().Err(err).Msg("failed to read internal node infos") - } - log.Info().Msgf("read %v internal private node-info files", len(privInternals)) - - var nodes []model.NodeInfo - for _, internal := range privInternals { - // check if address is valid format - common.ValidateAddressFormat(log, internal.Address) - - // validate every single internal node - err := common.ValidateNodeID(internal.NodeID) - if err != nil { - log.Fatal().Err(err).Msg(fmt.Sprintf("invalid node ID: %s", internal.NodeID)) - } - - node := model.NewPrivateNodeInfo( - internal.NodeID, - internal.Role, - internal.Address, - flow.DefaultInitialWeight, - internal.NetworkPrivKey, - internal.StakingPrivKey, - ) - - nodes = append(nodes, node) - } - - return nodes -} - -func assemblePartnerNodesWithoutWeight() []model.NodeInfo { - partners, err := common.ReadPartnerNodeInfos(flagPartnerNodeInfoDir) - if err != nil { - log.Fatal().Err(err).Msg("failed to read partner node infos") - } - log.Info().Msgf("read %v partner node configuration files", len(partners)) - return createPublicNodeInfo(partners) -} - -func readStakingContractDetails() []model.NodeInfo { - var stakingNodes []model.NodeInfoPub - err := common.ReadJSON(flagStakingNodesPath, &stakingNodes) - if err != nil { - log.Fatal().Err(err).Msg("failed to read json") - } - return createPublicNodeInfo(stakingNodes) -} - -func createPublicNodeInfo(nodes []model.NodeInfoPub) []model.NodeInfo { - var publicInfoNodes []model.NodeInfo - for _, n := range nodes { - common.ValidateAddressFormat(log, n.Address) - - // validate every single partner node - err := common.ValidateNodeID(n.NodeID) - if err != nil { - log.Fatal().Err(err).Msg(fmt.Sprintf("invalid node ID: %s", n.NodeID)) - } - err = common.ValidateNetworkPubKey(n.NetworkPubKey) - if err != nil { - log.Fatal().Err(err).Msg(fmt.Sprintf("invalid network public key: %s", n.NetworkPubKey)) - } - err = common.ValidateStakingPubKey(n.StakingPubKey) - if err != nil { - log.Fatal().Err(err).Msg(fmt.Sprintf("invalid staking public key: %s", n.StakingPubKey)) - } - - // all nodes should have equal weight (this might change in the future) - node := model.NewPublicNodeInfo( - n.NodeID, - n.Role, - n.Address, - flow.DefaultInitialWeight, - n.NetworkPubKey, - n.StakingPubKey, - ) - - publicInfoNodes = append(publicInfoNodes, node) - } - - return publicInfoNodes -} diff --git a/cmd/bootstrap/cmd/finalize.go b/cmd/bootstrap/cmd/finalize.go index ed7863180fd..49fc352dc28 100644 --- a/cmd/bootstrap/cmd/finalize.go +++ b/cmd/bootstrap/cmd/finalize.go @@ -18,11 +18,12 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/model/bootstrap" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/dkg" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/epochs" - "github.com/onflow/flow-go/state/protocol/badger" + "github.com/onflow/flow-go/state/protocol/datastore" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/state/protocol/protocol_state" "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" @@ -50,7 +51,7 @@ var ( var finalizeCmd = &cobra.Command{ Use: "finalize", Short: "Finalize the bootstrapping process", - Long: `Finalize the bootstrapping process, which includes running the DKG for the generation of the random beacon + Long: `Finalize the bootstrapping process, which includes generating the random beacon keys and generating the root block, QC, execution result and block seal.`, Run: finalize, } @@ -68,12 +69,12 @@ func addFinalizeCmdFlags() { "containing the output from the `keygen` command for internal nodes") finalizeCmd.Flags().StringVar(&flagPartnerNodeInfoDir, "partner-dir", "", "path to directory "+ "containing one JSON file starting with node-info.pub..json for every partner node (fields "+ - " in the JSON file: Role, Address, NodeID, NetworkPubKey, StakingPubKey)") + " in the JSON file: Role, Address, NodeID, NetworkPubKey, StakingPubKey, StakingKeyPoP)") // Deprecated: remove this flag finalizeCmd.Flags().StringVar(&deprecatedFlagPartnerStakes, "partner-stakes", "", "deprecated: use partner-weights instead") finalizeCmd.Flags().StringVar(&flagPartnerWeights, "partner-weights", "", "path to a JSON file containing "+ "a map from partner node's NodeID to their weight") - finalizeCmd.Flags().StringVar(&flagDKGDataPath, "dkg-data", "", "path to a JSON file containing data as output from DKG process") + finalizeCmd.Flags().StringVar(&flagDKGDataPath, "dkg-data", "", "path to a JSON file containing data as output from the random beacon key generation") finalizeCmd.Flags().StringVar(&flagRootCommit, "root-commit", "0000000000000000000000000000000000000000000000000000000000000000", "state commitment of root execution state") cmd.MarkFlagRequired(finalizeCmd, "config") @@ -139,7 +140,7 @@ func finalize(cmd *cobra.Command, args []string) { log.Info().Msg("") // create flow.IdentityList representation of participant set - participants := model.ToIdentityList(stakingNodes).Sort(flow.Canonical[flow.Identity]) + participants := bootstrap.Sort(stakingNodes, flow.Canonical[flow.Identity]) log.Info().Msg("reading root block data") block := readRootBlock() @@ -151,7 +152,7 @@ func finalize(cmd *cobra.Command, args []string) { log.Info().Msgf("received votes total: %v", len(votes)) - log.Info().Msg("reading dkg data") + log.Info().Msg("reading random beacon keys") dkgData, _ := readRandomBeaconKeys() log.Info().Msg("") @@ -202,13 +203,13 @@ func finalize(cmd *cobra.Command, args []string) { // validate the generated root snapshot is valid verifyResultID := true - err = badger.IsValidRootSnapshot(snapshot, verifyResultID) + err = datastore.IsValidRootSnapshot(snapshot, verifyResultID) if err != nil { log.Fatal().Err(err).Msg("the generated root snapshot is invalid") } // validate the generated root snapshot QCs - err = badger.IsValidRootSnapshotQCs(snapshot) + err = datastore.IsValidRootSnapshotQCs(snapshot) if err != nil { log.Fatal().Err(err).Msg("root snapshot contains invalid QCs") } @@ -246,13 +247,13 @@ func finalize(cmd *cobra.Command, args []string) { log.Info().Msg("saved result and seal are matching") - err = badger.IsValidRootSnapshot(rootSnapshot, verifyResultID) + err = datastore.IsValidRootSnapshot(rootSnapshot, verifyResultID) if err != nil { log.Fatal().Err(err).Msg("saved snapshot is invalid") } // validate the generated root snapshot QCs - err = badger.IsValidRootSnapshotQCs(snapshot) + err = datastore.IsValidRootSnapshotQCs(snapshot) if err != nil { log.Fatal().Err(err).Msg("root snapshot contains invalid QCs") } @@ -408,7 +409,7 @@ func readIntermediaryBootstrappingData() *IntermediaryBootstrappingData { func generateEmptyExecutionState( rootBlock *flow.Header, epochConfig epochs.EpochConfig, - identities flow.IdentityList, + nodes []bootstrap.NodeInfo, ) (commit flow.StateCommitment) { log.Info().Msg("generating empty execution state") @@ -433,7 +434,7 @@ func generateEmptyExecutionState( fvm.WithAccountCreationFee(fvm.DefaultAccountCreationFee), fvm.WithStorageMBPerFLOW(fvm.DefaultStorageMBPerFLOW), fvm.WithEpochConfig(epochConfig), - fvm.WithIdentities(identities), + fvm.WithNodes(nodes), ) if err != nil { log.Fatal().Err(err).Msg("unable to generate execution state") diff --git a/cmd/bootstrap/cmd/finalize_test.go b/cmd/bootstrap/cmd/finalize_test.go index 8bd082b3199..e14a32c73f0 100644 --- a/cmd/bootstrap/cmd/finalize_test.go +++ b/cmd/bootstrap/cmd/finalize_test.go @@ -31,7 +31,7 @@ const finalizeHappyPathLogs = "collecting partner network and staking keys" + `reading root block data` + `reading root block votes` + `read vote .*` + - `reading dkg data` + + `reading random beacon keys` + `reading intermediary bootstrapping data` + `constructing root QC` + `constructing root execution result and block seal` + diff --git a/cmd/bootstrap/cmd/key.go b/cmd/bootstrap/cmd/key.go index 7ef97a19a8e..689d7d280f0 100644 --- a/cmd/bootstrap/cmd/key.go +++ b/cmd/bootstrap/cmd/key.go @@ -112,7 +112,11 @@ func keyCmdRun(_ *cobra.Command, _ []string) { } log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathSecretsEncryptionKey) - err = common.WriteJSON(fmt.Sprintf(model.PathNodeInfoPub, nodeInfo.NodeID), flagOutdir, nodeInfo.Public()) + public, err := nodeInfo.Public() + if err != nil { + log.Fatal().Err(err).Msg("could not access public keys") + } + err = common.WriteJSON(fmt.Sprintf(model.PathNodeInfoPub, nodeInfo.NodeID), flagOutdir, public) if err != nil { log.Fatal().Err(err).Msg("failed to write json") } diff --git a/cmd/bootstrap/cmd/keygen.go b/cmd/bootstrap/cmd/keygen.go index c76a4ca4e9a..ddb6435a9d5 100644 --- a/cmd/bootstrap/cmd/keygen.go +++ b/cmd/bootstrap/cmd/keygen.go @@ -89,7 +89,10 @@ var keygenCmd = &cobra.Command{ } log.Info().Msg("generating node public information") - genNodePubInfo(nodes) + err = writeNodePubInfo(nodes) + if err != nil { + log.Fatal().Err(err).Msg("failed to generate nodes public info") + } }, } @@ -120,14 +123,19 @@ func isEmptyDir(path string) (bool, error) { return false, err // Either not empty or error, suits both cases } -func genNodePubInfo(nodes []model.NodeInfo) { +func writeNodePubInfo(nodes []model.NodeInfo) error { pubNodes := make([]model.NodeInfoPub, 0, len(nodes)) for _, node := range nodes { - pubNodes = append(pubNodes, node.Public()) + pub, err := node.Public() + if err != nil { + return fmt.Errorf("failed to read public info: %w", err) + } + pubNodes = append(pubNodes, pub) } err := common.WriteJSON(model.PathInternalNodeInfosPub, flagOutdir, pubNodes) if err != nil { - log.Fatal().Err(err).Msg("failed to write json") + return err } log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathInternalNodeInfosPub) + return nil } diff --git a/cmd/bootstrap/cmd/keys.go b/cmd/bootstrap/cmd/keys.go index f33b5f28241..e6e75391567 100644 --- a/cmd/bootstrap/cmd/keys.go +++ b/cmd/bootstrap/cmd/keys.go @@ -56,7 +56,8 @@ func genNetworkAndStakingKeys() []model.NodeInfo { return model.Sort(internalNodes, flow.Canonical[flow.Identity]) } -func assembleNodeInfo(nodeConfig model.NodeConfig, networkKey, stakingKey crypto.PrivateKey) model.NodeInfo { +func assembleNodeInfo(nodeConfig model.NodeConfig, networkKey, stakingKey crypto.PrivateKey, +) model.NodeInfo { var err error nodeID, found := getNameID() if !found { @@ -71,7 +72,7 @@ func assembleNodeInfo(nodeConfig model.NodeConfig, networkKey, stakingKey crypto Str("stakingPubKey", stakingKey.PublicKey().String()). Msg("encoded public staking and network keys") - nodeInfo := model.NewPrivateNodeInfo( + nodeInfo, err := model.NewPrivateNodeInfo( nodeID, nodeConfig.Role, nodeConfig.Address, @@ -79,6 +80,9 @@ func assembleNodeInfo(nodeConfig model.NodeConfig, networkKey, stakingKey crypto networkKey, stakingKey, ) + if err != nil { + log.Fatal().Err(err).Msg("creating node info failed") + } return nodeInfo } diff --git a/cmd/bootstrap/cmd/partner_infos.go b/cmd/bootstrap/cmd/partner_infos.go index f6f463941b3..751dcbaea01 100644 --- a/cmd/bootstrap/cmd/partner_infos.go +++ b/cmd/bootstrap/cmd/partner_infos.go @@ -28,6 +28,7 @@ const ( networkingAddressField = "networkingAddress" networkingKeyField = "networkingKey" stakingKeyField = "stakingKey" + // PoP field isn't included because it is not stored on-chain ) const ( @@ -173,6 +174,8 @@ func parseNodeInfo(info cadence.Value) (*bootstrap.NodeInfoPub, error) { return nil, fmt.Errorf("failed to decode staking public key: %w", err) } + // PoP field isn't decoded because it is not stored on-chain + return &bootstrap.NodeInfoPub{ Role: flow.Role(fields[roleField].(cadence.UInt8)), Address: string(fields[networkingAddressField].(cadence.String)), diff --git a/cmd/bootstrap/cmd/rootblock.go b/cmd/bootstrap/cmd/rootblock.go index af930c7369d..ad60eab414a 100644 --- a/cmd/bootstrap/cmd/rootblock.go +++ b/cmd/bootstrap/cmd/rootblock.go @@ -187,7 +187,11 @@ func rootBlock(cmd *cobra.Command, args []string) { if err != nil { log.Fatal().Err(err).Msgf("failed to merge node infos") } - err = common.WriteJSON(model.PathNodeInfosPub, flagOutdir, model.ToPublicNodeInfoList(stakingNodes)) + publicInfo, err := model.ToPublicNodeInfoList(stakingNodes) + if err != nil { + log.Fatal().Msg("failed to read public node info") + } + err = common.WriteJSON(model.PathNodeInfosPub, flagOutdir, publicInfo) if err != nil { log.Fatal().Err(err).Msg("failed to write json") } @@ -221,7 +225,10 @@ func rootBlock(cmd *cobra.Command, args []string) { log.Info().Msg("") log.Info().Msg("constructing intermediary bootstrapping data") - epochSetup, epochCommit := constructRootEpochEvents(header.View, participants, assignments, clusterQCs, randomBeaconData, dkgIndexMap) + epochSetup, epochCommit, err := constructRootEpochEvents(header.View, participants, assignments, clusterQCs, randomBeaconData, dkgIndexMap) + if err != nil { + log.Fatal().Err(err).Msg("failed to construct root epoch events") + } epochConfig := generateExecutionStateEpochConfig(epochSetup, clusterQCs, randomBeaconData) intermediaryEpochData := IntermediaryEpochData{ RootEpochSetup: epochSetup, @@ -244,10 +251,15 @@ func rootBlock(cmd *cobra.Command, args []string) { log.Info().Msg("") log.Info().Msg("constructing root block") + minEpochStateEntry, err := inmem.EpochProtocolStateFromServiceEvents(epochSetup, epochCommit) + if err != nil { + log.Fatal().Err(err).Msg("failed to construct epoch protocol state") + } + rootProtocolState, err := kvstore.NewDefaultKVStore( flagFinalizationSafetyThreshold, flagEpochExtensionViewCount, - inmem.EpochProtocolStateFromServiceEvents(epochSetup, epochCommit).ID(), + minEpochStateEntry.ID(), ) if err != nil { log.Fatal().Err(err).Msg("failed to construct root kvstore") diff --git a/cmd/bootstrap/example_files/partner-node-infos/public-genesis-information/node-info.pub.047e39d906e9ff961fa76cb5f479942d862c6cb1e768eb4525d6066cd707d595.json b/cmd/bootstrap/example_files/partner-node-infos/public-genesis-information/node-info.pub.047e39d906e9ff961fa76cb5f479942d862c6cb1e768eb4525d6066cd707d595.json deleted file mode 100644 index 1c54ec0d6aa..00000000000 --- a/cmd/bootstrap/example_files/partner-node-infos/public-genesis-information/node-info.pub.047e39d906e9ff961fa76cb5f479942d862c6cb1e768eb4525d6066cd707d595.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "Role": "consensus", - "Address": "example.com", - "NodeID": "047e39d906e9ff961fa76cb5f479942d862c6cb1e768eb4525d6066cd707d595", - "Weight": 0, - "NetworkPubKey": "6fSXKlHhPLMqWo3nhBsyjTZlxGBl5HC4ZqC7p1z4GEHx48UKnDaKdz0QdOxzSJP2G+P3bzDiJdLbvEd1CSvVgA==", - "StakingPubKey": "gdQQp6cbOzc/pnhOMl8mNQTAsbkuGs78Q72/zmhrAK+Ii2c/v04F9CDEo+FuVc0eALL/T0ioZwaTFCBO9+JRjfakqOiBCI9b7Xj4E8Dv4vBHDQyLOBBqXeA2VLAJYgFL" -} diff --git a/cmd/bootstrap/example_files/partner-node-infos/public-genesis-information/node-info.pub.79a7f711ba44dfdcfe774769eb97af6af732337fbcf96b788dbe6c51d29c5ec6.json b/cmd/bootstrap/example_files/partner-node-infos/public-genesis-information/node-info.pub.79a7f711ba44dfdcfe774769eb97af6af732337fbcf96b788dbe6c51d29c5ec6.json new file mode 100644 index 00000000000..60f83924e4d --- /dev/null +++ b/cmd/bootstrap/example_files/partner-node-infos/public-genesis-information/node-info.pub.79a7f711ba44dfdcfe774769eb97af6af732337fbcf96b788dbe6c51d29c5ec6.json @@ -0,0 +1,9 @@ +{ + "Role": "consensus", + "Address": "example.com:1234", + "NodeID": "79a7f711ba44dfdcfe774769eb97af6af732337fbcf96b788dbe6c51d29c5ec6", + "Weight": 0, + "NetworkPubKey": "0162f95166d39db8a6486625813019fcc8bb3d9439ad1e57b44f7bf01235cbcabd66411c3faa98de806e439cb4372275b76dcd3af7d384d24851cbae89f92cda", + "StakingPubKey": "84f806be7e4db914358e5b66a405244161ad5bfd87939b3a9b428a941baa6ae245d0d7a6cef684bd7168815fda5e9b6506b2cc87ec9c52576913d1990fd7c376fc2c6884247ff6a7c0c46ca143e3697422913d53c134b9534a199b7fc8f57d50", + "StakingPoP": "oEz2R3qe86/ZaRAemZfpdjcBZcOt7RHLjMhqjf7gg99XMsaLjmDma94Rr9ylciti" +} \ No newline at end of file diff --git a/cmd/bootstrap/run/cluster_qc_test.go b/cmd/bootstrap/run/cluster_qc_test.go index 69b181d6bbe..ee22b45f5b9 100644 --- a/cmd/bootstrap/run/cluster_qc_test.go +++ b/cmd/bootstrap/run/cluster_qc_test.go @@ -45,7 +45,8 @@ func createClusterParticipants(t *testing.T, n int) []model.NodeInfo { participants := make([]model.NodeInfo, n) for i, id := range ids { - participants[i] = model.NewPrivateNodeInfo( + var err error + participants[i], err = model.NewPrivateNodeInfo( id.NodeID, id.Role, id.Address, @@ -53,6 +54,7 @@ func createClusterParticipants(t *testing.T, n int) []model.NodeInfo { networkKeys[i], stakingKeys[i], ) + require.NoError(t, err) } return participants diff --git a/cmd/bootstrap/run/qc_test.go b/cmd/bootstrap/run/qc_test.go index 79b93a7151f..9c5c1a231bc 100644 --- a/cmd/bootstrap/run/qc_test.go +++ b/cmd/bootstrap/run/qc_test.go @@ -69,7 +69,7 @@ func createSignerData(t *testing.T, n int) *ParticipantData { participantLookup[identity.NodeID] = lookupParticipant // add to participant list - nodeInfo := bootstrap.NewPrivateNodeInfo( + nodeInfo, err := bootstrap.NewPrivateNodeInfo( identity.NodeID, identity.Role, identity.Address, @@ -77,6 +77,7 @@ func createSignerData(t *testing.T, n int) *ParticipantData { networkingKeys[i], stakingKeys[i], ) + require.NoError(t, err) participants[i] = Participant{ NodeInfo: nodeInfo, RandomBeaconPrivKey: randomBSKs[i], diff --git a/cmd/bootstrap/run/seal.go b/cmd/bootstrap/run/seal.go index de9e5cb75bf..009d01cff1f 100644 --- a/cmd/bootstrap/run/seal.go +++ b/cmd/bootstrap/run/seal.go @@ -6,15 +6,25 @@ import ( "github.com/onflow/flow-go/model/flow" ) +// GenerateRootSeal generates a root seal matching the input root result. +// The input is assumed to be a valid root result. +// No errors are expected during normal operation. func GenerateRootSeal(result *flow.ExecutionResult) (*flow.Seal, error) { finalState, err := result.FinalStateCommitment() if err != nil { return nil, fmt.Errorf("generating root seal failed: %w", err) } - seal := &flow.Seal{ - BlockID: result.BlockID, - ResultID: result.ID(), - FinalState: finalState, + seal, err := flow.NewSeal( + flow.UntrustedSeal{ + BlockID: result.BlockID, + ResultID: result.ID(), + FinalState: finalState, + AggregatedApprovalSigs: nil, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct seal: %w", err) } + return seal, nil } diff --git a/cmd/bootstrap/transit/cmd/crypto_test.go b/cmd/bootstrap/transit/cmd/crypto_test.go index 4e8bfddfc73..a5932c68a40 100644 --- a/cmd/bootstrap/transit/cmd/crypto_test.go +++ b/cmd/bootstrap/transit/cmd/crypto_test.go @@ -14,14 +14,12 @@ import ( const nodeID string = "0000000000000000000000000000000000000000000000000000000000000001" func TestEndToEnd(t *testing.T) { - // Create a temp directory to work as "bootstrap" bootdir := t.TempDir() t.Logf("Created dir %s", bootdir) // Create test files - //bootcmd.WriteText(filepath.Join(bootdir, bootstrap.PathNodeId), []byte(nodeID) randomBeaconPath := filepath.Join(bootdir, fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID)) err := os.MkdirAll(filepath.Dir(randomBeaconPath), 0755) if err != nil { @@ -46,9 +44,13 @@ func TestEndToEnd(t *testing.T) { t.Fatalf("Error wrapping files: %s", err) } + unWrappedFilePath := filepath.Join( + bootdir, + fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID), + ) // Client: // Unwrap files - err = unWrapFile(bootdir, nodeID) + err = unWrapFile(bootdir, nodeID, bootdir, unWrappedFilePath) if err != nil { t.Fatalf("Error unwrapping response: %s", err) } diff --git a/cmd/bootstrap/transit/cmd/flags.go b/cmd/bootstrap/transit/cmd/flags.go index 6c1c2f19978..89eea52b283 100644 --- a/cmd/bootstrap/transit/cmd/flags.go +++ b/cmd/bootstrap/transit/cmd/flags.go @@ -11,6 +11,9 @@ var ( flagTimeout time.Duration flagConcurrency int64 - flagWrapID string // wrap ID - flagVoteFile string + flagWrapID string // wrap ID + flagVoteFile string + flagVoteFilePath string + flagNodeID string + flagOutputDir string ) diff --git a/cmd/bootstrap/transit/cmd/generate_root_block_vote.go b/cmd/bootstrap/transit/cmd/generate_root_block_vote.go index 562edc67372..370a7706684 100644 --- a/cmd/bootstrap/transit/cmd/generate_root_block_vote.go +++ b/cmd/bootstrap/transit/cmd/generate_root_block_vote.go @@ -26,6 +26,11 @@ var generateVoteCmd = &cobra.Command{ func init() { rootCmd.AddCommand(generateVoteCmd) + addGenerateVoteCmdFlags() +} + +func addGenerateVoteCmdFlags() { + generateVoteCmd.Flags().StringVarP(&flagOutputDir, "outputDir", "o", "", "ouput directory for vote files; if not set defaults to bootstrap directory") } func generateVote(c *cobra.Command, args []string) { @@ -47,8 +52,13 @@ func generateVote(c *cobra.Command, args []string) { } // load DKG private key - path := fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID) - data, err := io.ReadFile(filepath.Join(flagBootDir, path)) + path := filepath.Join(flagBootDir, fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID)) + // If output directory is specified, use it for the root-block.json + if flagOutputDir != "" { + path = filepath.Join(flagOutputDir, bootstrap.FilenameRandomBeaconPriv) + } + + data, err := io.ReadFile(path) if err != nil { log.Fatal().Err(err).Msg("could not read DKG private key file") } @@ -78,6 +88,12 @@ func generateVote(c *cobra.Command, args []string) { signer := verification.NewCombinedSigner(me, beaconKeyStore) path = filepath.Join(flagBootDir, bootstrap.PathRootBlockData) + + // If output directory is specified, use it for the root-block.json + if flagOutputDir != "" { + path = filepath.Join(flagOutputDir, "root-block.json") + } + data, err = io.ReadFile(path) if err != nil { log.Fatal().Err(err).Msg("could not read root block file") @@ -96,7 +112,15 @@ func generateVote(c *cobra.Command, args []string) { voteFile := fmt.Sprintf(bootstrap.PathNodeRootBlockVote, nodeID) - if err = io.WriteJSON(filepath.Join(flagBootDir, voteFile), vote); err != nil { + // By default, use the bootstrap directory for storing the vote file + voteFilePath := filepath.Join(flagBootDir, voteFile) + + // If output directory is specified, use it for the vote file path + if flagOutputDir != "" { + voteFilePath = filepath.Join(flagOutputDir, "root-block-vote.json") + } + + if err = io.WriteJSON(voteFilePath, vote); err != nil { log.Fatal().Err(err).Msg("could not write vote to file") } diff --git a/cmd/bootstrap/transit/cmd/prepare.go b/cmd/bootstrap/transit/cmd/prepare.go index c11e2e05314..f8d4944b4c2 100644 --- a/cmd/bootstrap/transit/cmd/prepare.go +++ b/cmd/bootstrap/transit/cmd/prepare.go @@ -21,6 +21,8 @@ func init() { func addPrepareCmdFlags() { prepareCmd.Flags().StringVarP(&flagNodeRole, "role", "r", "", `node role (can be "collection", "consensus", "execution", "verification" or "access")`) + prepareCmd.Flags().StringVarP(&flagNodeID, "nodeID", "n", "", "node id") + prepareCmd.Flags().StringVarP(&flagOutputDir, "outputDir", "o", "", "ouput directory") _ = prepareCmd.MarkFlagRequired("role") } @@ -38,12 +40,22 @@ func prepare(cmd *cobra.Command, args []string) { return } - nodeID, err := readNodeID() - if err != nil { - log.Fatal().Err(err).Msg("could not read node ID from file") + // Set the output directory from the flag or use the bootstrap directory + outputDir := flagOutputDir + if outputDir == "" { + outputDir = flagBootDir + } + + // Set the NodeID from the flag or read it from the file + nodeID := flagNodeID + if nodeID == "" { + nodeID, err = readNodeID() + if err != nil { + log.Fatal().Err(err).Msg("could not read node ID from file") + } } - err = generateKeys(flagBootDir, nodeID) + err = generateKeys(outputDir, nodeID) if err != nil { log.Fatal().Err(err).Msg("failed to prepare") } diff --git a/cmd/bootstrap/transit/cmd/pull.go b/cmd/bootstrap/transit/cmd/pull.go index cd5360fef83..9eb0a9861b5 100644 --- a/cmd/bootstrap/transit/cmd/pull.go +++ b/cmd/bootstrap/transit/cmd/pull.go @@ -15,6 +15,7 @@ import ( "github.com/onflow/flow-go/cmd/bootstrap/gcs" "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/model/bootstrap" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" ) @@ -37,6 +38,7 @@ func addPullCmdFlags() { pullCmd.Flags().StringVarP(&flagNodeRole, "role", "r", "", `node role (can be "collection", "consensus", "execution", "verification" or "access")`) pullCmd.Flags().DurationVar(&flagTimeout, "timeout", time.Second*300, `timeout for pull`) pullCmd.Flags().Int64Var(&flagConcurrency, "concurrency", 2, `concurrency limit for pull`) + pullCmd.Flags().StringVarP(&flagBucketName, "bucket-name", "g", "flow-genesis-bootstrap", `bucket for pulling bootstrap files`) _ = pullCmd.MarkFlagRequired("token") _ = pullCmd.MarkFlagRequired("role") @@ -155,7 +157,8 @@ func pull(cmd *cobra.Command, args []string) { // unwrap consensus node role files if role == flow.RoleConsensus { - err = unWrapFile(flagBootDir, nodeID) + unWrappedFilePath := filepath.Join(flagBootDir, fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID)) + err = unWrapFile(flagBootDir, nodeID, flagBootDir, unWrappedFilePath) if err != nil { log.Fatal().Err(err).Msg("failed to pull") } diff --git a/cmd/bootstrap/transit/cmd/pull_root_block.go b/cmd/bootstrap/transit/cmd/pull_root_block.go index bc6539bc8ad..3cb645f5124 100644 --- a/cmd/bootstrap/transit/cmd/pull_root_block.go +++ b/cmd/bootstrap/transit/cmd/pull_root_block.go @@ -25,6 +25,8 @@ func init() { func addPullRootBlockCmdFlags() { pullRootBlockCmd.Flags().StringVarP(&flagToken, "token", "t", "", "token provided by the Flow team to access the Transit server") + pullRootBlockCmd.Flags().StringVarP(&flagBucketName, "bucket-name", "g", "flow-genesis-bootstrap", `bucket for pulling root block`) + pullRootBlockCmd.Flags().StringVarP(&flagOutputDir, "outputDir", "o", "", "output directory for root block file; if not set defaults to bootstrap directory") _ = pullRootBlockCmd.MarkFlagRequired("token") } @@ -50,28 +52,45 @@ func pullRootBlock(c *cobra.Command, args []string) { log.Info().Msg("downloading root block") rootBlockFile := filepath.Join(flagToken, bootstrap.PathRootBlockData) - fullOutpath := filepath.Join(flagBootDir, bootstrap.PathRootBlockData) + fullRootBlockPath := filepath.Join(flagBootDir, bootstrap.PathRootBlockData) + if flagOutputDir != "" { + fullRootBlockPath = filepath.Join(flagOutputDir, "root-block.json") + } - log.Info().Str("source", rootBlockFile).Str("dest", fullOutpath).Msgf("downloading root block file from transit servers") - err = bucket.DownloadFile(ctx, client, fullOutpath, rootBlockFile) + log.Info().Str("source", rootBlockFile).Str("dest", fullRootBlockPath).Msgf("downloading root block file from transit servers") + err = bucket.DownloadFile(ctx, client, fullRootBlockPath, rootBlockFile) if err != nil { log.Fatal().Err(err).Msgf("could not download google bucket file") } - objectName := filepath.Join(flagToken, fmt.Sprintf(FilenameRandomBeaconCipher, nodeID)) - fullOutpath = filepath.Join(flagBootDir, filepath.Base(objectName)) + log.Info().Msg("successfully downloaded root block ") - log.Info().Msgf("downloading random beacon key: %s", objectName) + objectName := filepath.Join(flagToken, fmt.Sprintf(FilenameRandomBeaconCipher, nodeID)) - err = bucket.DownloadFile(ctx, client, fullOutpath, objectName) - if err != nil { - log.Fatal().Err(err).Msg("could not download file from google bucket") + // By default, use the bootstrap directory for the random beacon download & unwrapping + fullRandomBeaconPath := filepath.Join(flagBootDir, filepath.Base(objectName)) + unWrappedRandomBeaconPath := filepath.Join( + flagBootDir, + fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID), + ) + + // If output directory is specified, use it for the random beacon path + // this will set the path used to download the random beacon file and unwrap it + if flagOutputDir != "" { + fullRandomBeaconPath = filepath.Join(flagOutputDir, filepath.Base(objectName)) + unWrappedRandomBeaconPath = filepath.Join(flagOutputDir, bootstrap.FilenameRandomBeaconPriv) } - err = unWrapFile(flagBootDir, nodeID) + log.Info().Msgf("downloading random beacon key: %s", objectName) + + err = bucket.DownloadFile(ctx, client, fullRandomBeaconPath, objectName) if err != nil { - log.Fatal().Err(err).Msg("could not unwrap random beacon file") + log.Fatal().Err(err).Msg("could not download random beacon key file from google bucket") + } else { + err = unWrapFile(flagBootDir, nodeID, flagOutputDir, unWrappedRandomBeaconPath) + if err != nil { + log.Fatal().Err(err).Msg("could not unwrap random beacon file") + } + log.Info().Msg("successfully downloaded and unwrapped random beacon private key") } - - log.Info().Msg("successfully downloaded root block and random beacon key") } diff --git a/cmd/bootstrap/transit/cmd/push.go b/cmd/bootstrap/transit/cmd/push.go index e45b63d27f8..2d42b675489 100644 --- a/cmd/bootstrap/transit/cmd/push.go +++ b/cmd/bootstrap/transit/cmd/push.go @@ -29,6 +29,7 @@ func init() { func addPushCmdFlags() { pushCmd.Flags().StringVarP(&flagToken, "token", "t", "", "token provided by the Flow team to access the Transit server") pushCmd.Flags().StringVarP(&flagNodeRole, "role", "r", "", `node role (can be "collection", "consensus", "execution", "verification" or "access")`) + pushCmd.Flags().StringVarP(&flagBucketName, "bucket-name", "g", "flow-genesis-bootstrap", `bucket for uploading transit keys`) _ = pushCmd.MarkFlagRequired("token") } diff --git a/cmd/bootstrap/transit/cmd/push_root_block_vote.go b/cmd/bootstrap/transit/cmd/push_root_block_vote.go index d225b7cf8a5..51702ecd476 100644 --- a/cmd/bootstrap/transit/cmd/push_root_block_vote.go +++ b/cmd/bootstrap/transit/cmd/push_root_block_vote.go @@ -28,8 +28,11 @@ func addPushVoteCmdFlags() { defaultVoteFilePath := fmt.Sprintf(bootstrap.PathNodeRootBlockVote, "") pushVoteCmd.Flags().StringVarP(&flagToken, "token", "t", "", "token provided by the Flow team to access the Transit server") pushVoteCmd.Flags().StringVarP(&flagVoteFile, "vote-file", "v", "", fmt.Sprintf("path under bootstrap directory of the vote file to upload (default: %s)", defaultVoteFilePath)) + pushVoteCmd.Flags().StringVarP(&flagVoteFilePath, "vote-file-dir", "d", "", "directory for vote file to upload, ONLY for vote files outside the bootstrap directory") + pushVoteCmd.Flags().StringVarP(&flagBucketName, "bucket-name", "g", "flow-genesis-bootstrap", `bucket for pushing root block vote files`) _ = pushVoteCmd.MarkFlagRequired("token") + pushVoteCmd.MarkFlagsMutuallyExclusive("vote-file", "vote-file-dir") } func pushVote(c *cobra.Command, args []string) { @@ -44,12 +47,22 @@ func pushVote(c *cobra.Command, args []string) { } voteFile := flagVoteFile + + // If --vote-file-dir is not specified, use the bootstrap directory + voteFilePath := filepath.Join(flagBootDir, voteFile) + + // if --vote-file is not specified, use default file name within bootstrap directory if voteFile == "" { voteFile = fmt.Sprintf(bootstrap.PathNodeRootBlockVote, nodeID) + voteFilePath = filepath.Join(flagBootDir, voteFile) + } + + // If vote-file-dir is specified, use it to construct the full path to the vote file (with default file name) + if flagVoteFilePath != "" { + voteFilePath = filepath.Join(flagVoteFilePath, "root-block-vote.json") } destination := filepath.Join(flagToken, fmt.Sprintf(bootstrap.FilenameRootBlockVote, nodeID)) - source := filepath.Join(flagBootDir, voteFile) log.Info().Msg("pushing root block vote") @@ -66,7 +79,7 @@ func pushVote(c *cobra.Command, args []string) { } defer client.Close() - err = bucket.UploadFile(ctx, client, destination, source) + err = bucket.UploadFile(ctx, client, destination, voteFilePath) if err != nil { log.Fatal().Err(err).Msg("failed to upload vote file") } diff --git a/cmd/bootstrap/transit/cmd/upload_transit_keys.go b/cmd/bootstrap/transit/cmd/upload_transit_keys.go index d46c44b9657..7333d537d41 100644 --- a/cmd/bootstrap/transit/cmd/upload_transit_keys.go +++ b/cmd/bootstrap/transit/cmd/upload_transit_keys.go @@ -29,6 +29,7 @@ func init() { func addUploadTransitKeysCmdFlags() { pushTransitKeyCmd.Flags().StringVarP(&flagToken, "token", "t", "", "token provided by the Flow team") + pushTransitKeyCmd.Flags().StringVarP(&flagBucketName, "bucket-name", "g", "flow-genesis-bootstrap", `bucket used for pushing transit keys`) err := pushTransitKeyCmd.MarkFlagRequired("token") if err != nil { log.Fatal().Err(err).Msg("failed to initialize") @@ -37,7 +38,6 @@ func addUploadTransitKeysCmdFlags() { // pushTransitKey uploads transit keys to the Flow server func pushTransitKey(_ *cobra.Command, _ []string) { - nodeIDString, err := readNodeID() if err != nil { log.Fatal().Err(err).Msg("could not read node ID") diff --git a/cmd/bootstrap/transit/cmd/utils.go b/cmd/bootstrap/transit/cmd/utils.go index 1f8f2f7920b..27c75b4dc22 100644 --- a/cmd/bootstrap/transit/cmd/utils.go +++ b/cmd/bootstrap/transit/cmd/utils.go @@ -75,7 +75,6 @@ func getFileSHA256(file string) (string, error) { // moveFile moves a file from source to destination where src and dst are full paths including the filename func moveFile(src, dst string) error { - // check if source file exist if !ioutils.FileExists(src) { return fmt.Errorf("file not found: %s", src) @@ -153,14 +152,12 @@ func moveFile(src, dst string) error { return nil } -func unWrapFile(bootDir string, nodeID string) error { - +func unWrapFile(bootDir, nodeID, cipherTextDir, plaintextPath string) error { log.Info().Msg("decrypting Random Beacon key") pubKeyPath := filepath.Join(bootDir, fmt.Sprintf(FilenameTransitKeyPub, nodeID)) privKeyPath := filepath.Join(bootDir, fmt.Sprintf(FilenameTransitKeyPriv, nodeID)) - ciphertextPath := filepath.Join(bootDir, fmt.Sprintf(FilenameRandomBeaconCipher, nodeID)) - plaintextPath := filepath.Join(bootDir, fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID)) + ciphertextPath := filepath.Join(cipherTextDir, fmt.Sprintf(FilenameRandomBeaconCipher, nodeID)) ciphertext, err := ioutils.ReadFile(ciphertextPath) if err != nil { @@ -231,7 +228,6 @@ func wrapFile(bootDir string, nodeID string) error { // generateKeys creates the transit keypair and writes them to disk for later func generateKeys(bootDir string, nodeID string) error { - privPath := filepath.Join(bootDir, fmt.Sprintf(FilenameTransitKeyPriv, nodeID)) pubPath := filepath.Join(bootDir, fmt.Sprintf(FilenameTransitKeyPub, nodeID)) diff --git a/cmd/bootstrap/utils/key_generation.go b/cmd/bootstrap/utils/key_generation.go index 6cb662be21c..7d82109309d 100644 --- a/cmd/bootstrap/utils/key_generation.go +++ b/cmd/bootstrap/utils/key_generation.go @@ -128,7 +128,15 @@ func GenerateStakingKey(seed []byte) (crypto.PrivateKey, error) { } func GenerateStakingKeys(n int, seeds [][]byte) ([]crypto.PrivateKey, error) { - return GenerateKeys(crypto.BLSBLS12381, n, seeds) + keys := make([]crypto.PrivateKey, 0, n) + for i := 0; i < n; i++ { + key, err := GenerateStakingKey(seeds[i]) + if err != nil { + return nil, err + } + keys = append(keys, key) + } + return keys, nil } func GenerateKeys(algo crypto.SigningAlgorithm, n int, seeds [][]byte) ([]crypto.PrivateKey, error) { diff --git a/cmd/bootstrap/utils/node_info.go b/cmd/bootstrap/utils/node_info.go index 2dbafa7d1fa..d8bc11b649e 100644 --- a/cmd/bootstrap/utils/node_info.go +++ b/cmd/bootstrap/utils/node_info.go @@ -16,11 +16,15 @@ import ( // also writes a map containing each of the nodes weights mapped by NodeID func WritePartnerFiles(nodeInfos []model.NodeInfo, bootDir string) (string, string, error) { - // convert to public nodeInfos and map stkes + // convert to public nodeInfos and create a map from nodeID to weight nodePubInfos := make([]model.NodeInfoPub, len(nodeInfos)) weights := make(map[flow.Identifier]uint64) for i, node := range nodeInfos { - nodePubInfos[i] = node.Public() + var err error + nodePubInfos[i], err = node.Public() + if err != nil { + return "", "", fmt.Errorf("could not read public info: %w", err) + } weights[node.NodeID] = node.Weight } @@ -105,35 +109,35 @@ func GenerateNodeInfos(consensus, collection, execution, verification, access in nodes := make([]model.NodeInfo, 0) - // CONSENSUS = 1 + // CONSENSUS consensusNodes := unittest.NodeInfosFixture(consensus, unittest.WithRole(flow.RoleConsensus), unittest.WithInitialWeight(flow.DefaultInitialWeight), ) nodes = append(nodes, consensusNodes...) - // COLLECTION = 1 + // COLLECTION collectionNodes := unittest.NodeInfosFixture(collection, unittest.WithRole(flow.RoleCollection), unittest.WithInitialWeight(flow.DefaultInitialWeight), ) nodes = append(nodes, collectionNodes...) - // EXECUTION = 1 + // EXECUTION executionNodes := unittest.NodeInfosFixture(execution, unittest.WithRole(flow.RoleExecution), unittest.WithInitialWeight(flow.DefaultInitialWeight), ) nodes = append(nodes, executionNodes...) - // VERIFICATION = 1 + // VERIFICATION verificationNodes := unittest.NodeInfosFixture(verification, unittest.WithRole(flow.RoleVerification), unittest.WithInitialWeight(flow.DefaultInitialWeight), ) nodes = append(nodes, verificationNodes...) - // ACCESS = 1 + // ACCESS accessNodes := unittest.NodeInfosFixture(access, unittest.WithRole(flow.RoleAccess), unittest.WithInitialWeight(flow.DefaultInitialWeight), diff --git a/cmd/collection/main.go b/cmd/collection/main.go index 8686e91a351..a529e18f766 100644 --- a/cmd/collection/main.go +++ b/cmd/collection/main.go @@ -56,7 +56,7 @@ import ( badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" "github.com/onflow/flow-go/state/protocol/events/gadgets" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/grpcutils" ) @@ -151,7 +151,7 @@ func main() { // Collection Nodes use a lower min timeout than Consensus Nodes (1.5s vs 2.5s) because: // - they tend to have higher happy-path view rate, allowing a shorter timeout // - since they have smaller committees, 1-2 offline replicas has a larger negative impact, which is mitigating with a smaller timeout - flags.DurationVar(&hotstuffMinTimeout, "hotstuff-min-timeout", 1500*time.Millisecond, + flags.DurationVar(&hotstuffMinTimeout, "hotstuff-min-timeout", 1000*time.Millisecond, "the lower timeout bound for the hotstuff pacemaker, this is also used as initial timeout") flags.Float64Var(&hotstuffTimeoutAdjustmentFactor, "hotstuff-timeout-adjustment-factor", timeout.DefaultConfig.TimeoutAdjustmentFactor, "adjustment of timeout duration in case of time out event") @@ -218,12 +218,9 @@ func main() { return collectionCommands.NewTxRateLimitCommand(addressRateLimiter) }). AdminCommand("read-range-cluster-blocks", func(conf *cmd.NodeConfig) commands.AdminCommand { - clusterPayloads := badger.NewClusterPayloads(&metrics.NoopCollector{}, conf.DB) - headers, ok := conf.Storage.Headers.(*badger.Headers) - if !ok { - panic("fail to initialize admin tool, conf.Storage.Headers can not be casted as badger headers") - } - return storageCommands.NewReadRangeClusterBlocksCommand(conf.DB, headers, clusterPayloads) + clusterPayloads := store.NewClusterPayloads(&metrics.NoopCollector{}, conf.ProtocolDB) + headers := store.NewHeaders(&metrics.NoopCollector{}, conf.ProtocolDB) + return storageCommands.NewReadRangeClusterBlocksCommand(conf.ProtocolDB, headers, clusterPayloads) }). Module("follower distributor", func(node *cmd.NodeConfig) error { followerDistributor = pubsub.NewFollowerDistributor() @@ -328,7 +325,7 @@ func main() { Component("follower core", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // create a finalizer for updating the protocol // state when the follower detects newly finalized blocks - finalizer := confinalizer.NewFinalizer(node.DB, node.Storage.Headers, followerState, node.Tracer) + finalizer := confinalizer.NewFinalizer(node.ProtocolDB.Reader(), node.Storage.Headers, followerState, node.Tracer) finalized, pending, err := recovery.FindLatest(node.State, node.Storage.Headers) if err != nil { return nil, fmt.Errorf("could not find latest finalized block and pending blocks to recover consensus follower: %w", err) @@ -460,7 +457,7 @@ func main() { collectionRequestQueue := queue.NewHeroStore(maxCollectionRequestCacheSize, node.Logger, collectionRequestMetrics) return provider.New( - node.Logger.With().Str("engine", "collection_provider").Logger(), + node.Logger.With().Str("entity", "collection").Logger(), node.Metrics.Engine, node.EngineRegistry, node.Me, @@ -489,7 +486,7 @@ func main() { // Epoch manager encapsulates and manages epoch-dependent engines as we // transition between epochs Component("epoch manager", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - clusterStateFactory, err := factories.NewClusterStateFactory(node.DB, node.Metrics.Cache, node.Tracer) + clusterStateFactory, err := factories.NewClusterStateFactory(node.ProtocolDB, node.StorageLockMgr, node.Metrics.Cache, node.Tracer) if err != nil { return nil, err } @@ -502,8 +499,9 @@ func main() { } builderFactory, err := factories.NewBuilderFactory( - node.DB, + node.ProtocolDB, node.State, + node.StorageLockMgr, node.Storage.Headers, node.Tracer, colMetrics, diff --git a/cmd/consensus/main.go b/cmd/consensus/main.go index df3fedb04e3..b1c422975f8 100644 --- a/cmd/consensus/main.go +++ b/cmd/consensus/main.go @@ -65,10 +65,10 @@ import ( "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" + "github.com/onflow/flow-go/state/protocol/datastore" "github.com/onflow/flow-go/state/protocol/events/gadgets" protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" "github.com/onflow/flow-go/utils/io" ) @@ -201,16 +201,7 @@ func main() { nodeBuilder. PreInit(cmd.DynamicStartPreInit). - ValidateRootSnapshot(badgerState.ValidRootSnapshotContainsEntityExpiryRange). - PostInit(func(nodeConfig *cmd.NodeConfig) error { - // TODO(EFM, #6794): This function is introduced to implement a backward-compatible upgrade from v1 to v2. - // Remove this once we complete the network upgrade. - log := nodeConfig.Logger.With().Str("postinit", "dkg_end_state_migration").Logger() - if err := operation.RetryOnConflict(nodeBuilder.SecretsDB.Update, operation.MigrateDKGEndStateFromV1(log)); err != nil { - return fmt.Errorf("could not migrate DKG end state from v1 to v2: %w", err) - } - return nil - }). + ValidateRootSnapshot(datastore.ValidRootSnapshotContainsEntityExpiryRange). Module("machine account config", func(node *cmd.NodeConfig) error { machineAccountInfo, err = cmd.LoadNodeMachineAccountInfoFile(node.BootstrapDir, node.NodeID) return err @@ -501,7 +492,7 @@ func main() { }). Component("matching engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { receiptRequester, err = requester.New( - node.Logger, + node.Logger.With().Str("entity", "receipt").Logger(), node.Metrics.Engine, node.EngineRegistry, node.Me, @@ -587,7 +578,7 @@ func main() { Component("hotstuff modules", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // initialize the block finalizer finalize := finalizer.NewFinalizer( - node.DB, + node.ProtocolDB.Reader(), node.Storage.Headers, mutableState, node.Tracer, @@ -750,6 +741,7 @@ func main() { return ctl, nil }). Component("consensus participant", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + // create different epochs setups mutableProtocolState := protocol_state.NewMutableProtocolState( node.Logger, node.Storage.EpochProtocolStateEntries, diff --git a/cmd/dynamic_startup.go b/cmd/dynamic_startup.go index 3460f963997..0704a10449b 100644 --- a/cmd/dynamic_startup.go +++ b/cmd/dynamic_startup.go @@ -59,7 +59,7 @@ func DynamicStartPreInit(nodeConfig *NodeConfig) error { log := nodeConfig.Logger.With().Str("component", "dynamic-startup").Logger() // CASE 1: The state is already bootstrapped - nothing to do - isBootstrapped, err := badgerstate.IsBootstrapped(nodeConfig.DB) + isBootstrapped, err := badgerstate.IsBootstrapped(nodeConfig.ProtocolDB) if err != nil { return fmt.Errorf("could not check if state is boostrapped: %w", err) } diff --git a/cmd/execution_builder.go b/cmd/execution_builder.go index 70a6a712dbe..a216af43e9c 100644 --- a/cmd/execution_builder.go +++ b/cmd/execution_builder.go @@ -13,10 +13,9 @@ import ( awsconfig "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/ipfs/boxo/bitswap" "github.com/ipfs/go-cid" - badgerds "github.com/ipfs/go-ds-badger2" "github.com/onflow/cadence" "github.com/onflow/flow-core-contracts/lib/go/templates" "github.com/rs/zerolog" @@ -78,6 +77,8 @@ import ( "github.com/onflow/flow-go/module/executiondatasync/execution_data" exedataprovider "github.com/onflow/flow-go/module/executiondatasync/provider" "github.com/onflow/flow-go/module/executiondatasync/pruner" + edstorage "github.com/onflow/flow-go/module/executiondatasync/storage" + execdatastorage "github.com/onflow/flow-go/module/executiondatasync/storage" "github.com/onflow/flow-go/module/executiondatasync/tracker" "github.com/onflow/flow-go/module/finalizedreader" finalizer "github.com/onflow/flow-go/module/finalizer/consensus" @@ -168,7 +169,7 @@ type ExecutionNode struct { executionDataStore execution_data.ExecutionDataStore toTriggerCheckpoint *atomic.Bool // create the checkpoint trigger to be controlled by admin tool, and listened by the compactor stopControl *stop.StopControl // stop the node at given block height - executionDataDatastore *badgerds.Datastore + executionDataDatastore execdatastorage.DatastoreManager executionDataPruner *pruner.Pruner executionDataBlobstore blobs.Blobstore executionDataTracker tracker.Storage @@ -417,7 +418,7 @@ func (exeNode *ExecutionNode) LoadBlobService( if node.ObserverMode { edsChannel = channels.PublicExecutionDataService } - bs, err := node.EngineRegistry.RegisterBlobService(edsChannel, exeNode.executionDataDatastore, opts...) + bs, err := node.EngineRegistry.RegisterBlobService(edsChannel, exeNode.executionDataDatastore.Datastore(), opts...) if err != nil { return nil, fmt.Errorf("failed to register blob service: %w", err) } @@ -710,19 +711,14 @@ func (exeNode *ExecutionNode) LoadAuthorizationCheckingFunction( func (exeNode *ExecutionNode) LoadExecutionDataDatastore( node *NodeConfig, -) error { - datastoreDir := filepath.Join(exeNode.exeConf.executionDataDir, "blobstore") - err := os.MkdirAll(datastoreDir, 0700) +) (err error) { + exeNode.executionDataDatastore, err = edstorage.CreateDatastoreManager( + node.Logger, exeNode.exeConf.executionDataDir, exeNode.exeConf.executionDataDBMode) if err != nil { - return err + return fmt.Errorf("could not create execution data datastore manager: %w", err) } - dsOpts := &badgerds.DefaultOptions - ds, err := badgerds.NewDatastore(datastoreDir, dsOpts) - if err != nil { - return err - } - exeNode.executionDataDatastore = ds - exeNode.builder.ShutdownFunc(ds.Close) + + exeNode.builder.ShutdownFunc(exeNode.executionDataDatastore.Close) return nil } @@ -733,7 +729,7 @@ func (exeNode *ExecutionNode) LoadBlobservicePeerManagerDependencies(node *NodeC } func (exeNode *ExecutionNode) LoadExecutionDataGetter(node *NodeConfig) error { - exeNode.executionDataBlobstore = blobs.NewBlobstore(exeNode.executionDataDatastore) + exeNode.executionDataBlobstore = blobs.NewBlobstore(exeNode.executionDataDatastore.Datastore()) exeNode.executionDataStore = execution_data.NewExecutionDataStore(exeNode.executionDataBlobstore, execution_data.DefaultSerializer) return nil } @@ -745,7 +741,7 @@ func (exeNode *ExecutionNode) LoadExecutionState( error, ) { - chunkDataPackDB, err := storagepebble.OpenDefaultPebbleDB( + chunkDataPackDB, err := storagepebble.SafeOpen( node.Logger.With().Str("pebbledb", "cdp").Logger(), exeNode.exeConf.chunkDataPackDir, ) @@ -791,6 +787,7 @@ func (exeNode *ExecutionNode) LoadExecutionState( node.Tracer, exeNode.registerStore, exeNode.exeConf.enableStorehouse, + node.StorageLockMgr, ) height, _, err := exeNode.executionState.GetLastExecutedBlockID(context.Background()) @@ -1094,7 +1091,7 @@ func (exeNode *ExecutionNode) LoadIngestionEngine( colFetcher = accessFetcher exeNode.collectionRequester = accessFetcher } else { - reqEng, err := requester.New(node.Logger, node.Metrics.Engine, node.EngineRegistry, node.Me, node.State, + reqEng, err := requester.New(node.Logger.With().Str("entity", "collection").Logger(), node.Metrics.Engine, node.EngineRegistry, node.Me, node.State, channels.RequestCollections, filter.Any, func() flow.Entity { return &flow.Collection{} }, @@ -1193,7 +1190,7 @@ func (exeNode *ExecutionNode) LoadFollowerCore( ) { // create a finalizer that handles updating the protocol // state when the follower detects newly finalized blocks - final := finalizer.NewFinalizer(node.DB, node.Storage.Headers, exeNode.followerState, node.Tracer) + final := finalizer.NewFinalizer(node.ProtocolDB.Reader(), node.Storage.Headers, exeNode.followerState, node.Tracer) finalized, pending, err := recovery.FindLatest(node.State, node.Storage.Headers) if err != nil { @@ -1302,7 +1299,7 @@ func (exeNode *ExecutionNode) LoadReceiptProviderEngine( engineRegister = &underlay.NoopEngineRegister{} } eng, err := provider.New( - node.Logger.With().Str("engine", "receipt_provider").Logger(), + node.Logger.With().Str("entity", "receipt").Logger(), node.Metrics.Engine, engineRegister, node.Me, @@ -1383,11 +1380,13 @@ func (exeNode *ExecutionNode) LoadBootstrapper(node *NodeConfig) error { // in order to support switching from badger to pebble in the middle of the spork, // we will check if the execution database has been bootstrapped by reading the state from badger db. // and if not, bootstrap both badger and pebble db. - commit, bootstrapped, err := bootstrapper.IsBootstrapped(badgerimpl.ToDB(node.DB)) + commit, bootstrapped, err := bootstrapper.IsBootstrapped(node.ProtocolDB) if err != nil { return fmt.Errorf("could not query database to know whether database has been bootstrapped: %w", err) } + node.Logger.Info().Msgf("execution database bootstrapped: %v, commit: %v", bootstrapped, commit) + // if the execution database does not exist, then we need to bootstrap the execution database. if !bootstrapped { @@ -1408,12 +1407,12 @@ func (exeNode *ExecutionNode) LoadBootstrapper(node *NodeConfig) error { return fmt.Errorf("could not load bootstrap state from checkpoint file: %w", err) } - err = bootstrapper.BootstrapExecutionDatabase(badgerimpl.ToDB(node.DB), node.RootSeal) + err = bootstrapper.BootstrapExecutionDatabase(node.StorageLockMgr, badgerimpl.ToDB(node.DB), node.RootSeal) if err != nil { return fmt.Errorf("could not bootstrap execution database: %w", err) } - err = bootstrapper.BootstrapExecutionDatabase(pebbleimpl.ToDB(node.PebbleDB), node.RootSeal) + err = bootstrapper.BootstrapExecutionDatabase(node.StorageLockMgr, pebbleimpl.ToDB(node.PebbleDB), node.RootSeal) if err != nil { return fmt.Errorf("could not bootstrap execution database: %w", err) } diff --git a/cmd/execution_config.go b/cmd/execution_config.go index cc2c3c8852b..b469b00b6eb 100644 --- a/cmd/execution_config.go +++ b/cmd/execution_config.go @@ -14,14 +14,15 @@ import ( exepruner "github.com/onflow/flow-go/engine/execution/pruner" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/mempool" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/grpcutils" "github.com/onflow/flow-go/engine/execution/computation" "github.com/onflow/flow-go/engine/execution/ingestion/stop" "github.com/onflow/flow-go/engine/execution/rpc" "github.com/onflow/flow-go/fvm/storage/derived" - storage "github.com/onflow/flow-go/storage/badger" ) // ExecutionConfig contains the configs for starting up execution nodes @@ -58,6 +59,7 @@ type ExecutionConfig struct { importCheckpointWorkerCount int transactionExecutionMetricsEnabled bool transactionExecutionMetricsBufferSize uint + executionDataDBMode string computationConfig computation.ComputationConfig receiptRequestWorkers uint // common provider engine workers @@ -97,7 +99,7 @@ func (exeConf *ExecutionConfig) SetupFlags(flags *pflag.FlagSet) { flags.IntVar(&exeConf.computationConfig.MaxConcurrency, "computer-max-concurrency", 1, "set to greater than 1 to enable concurrent transaction execution") flags.StringVar(&exeConf.chunkDataPackDir, "chunk-data-pack-dir", filepath.Join(datadir, "chunk_data_packs"), "directory to use for storing chunk data packs") flags.StringVar(&exeConf.chunkDataPackCheckpointsDir, "chunk-data-pack-checkpoints-dir", filepath.Join(datadir, "chunk_data_packs_checkpoints_dir"), "directory to use storing chunk data packs pebble database checkpoints for querying while the node is running") - flags.UintVar(&exeConf.chunkDataPackCacheSize, "chdp-cache", storage.DefaultCacheSize, "cache size for chunk data packs") + flags.UintVar(&exeConf.chunkDataPackCacheSize, "chdp-cache", store.DefaultCacheSize, "cache size for chunk data packs") flags.Uint32Var(&exeConf.chunkDataPackRequestsCacheSize, "chdp-request-queue", mempool.DefaultChunkDataPackRequestQueueSize, "queue size for chunk data pack requests") flags.DurationVar(&exeConf.requestInterval, "request-interval", 60*time.Second, "the interval between requests for the requester engine") flags.Uint32Var(&exeConf.receiptRequestsCacheSize, "receipt-request-cache", provider.DefaultEntityRequestCacheSize, "queue size for entity requests at common provider engine") @@ -129,6 +131,10 @@ func (exeConf *ExecutionConfig) SetupFlags(flags *pflag.FlagSet) { flags.IntVar(&exeConf.importCheckpointWorkerCount, "import-checkpoint-worker-count", 10, "number of workers to import checkpoint file during bootstrap") flags.BoolVar(&exeConf.transactionExecutionMetricsEnabled, "tx-execution-metrics", true, "enable collection of transaction execution metrics") flags.UintVar(&exeConf.transactionExecutionMetricsBufferSize, "tx-execution-metrics-buffer-size", 200, "buffer size for transaction execution metrics. The buffer size is the number of blocks that are kept in memory by the metrics provider engine") + flags.StringVar(&exeConf.executionDataDBMode, + "execution-data-db", + execution_data.ExecutionDataDBModeBadger.String(), + "[experimental] the DB type for execution datastore. One of [badger, pebble]") flags.BoolVar(&exeConf.onflowOnlyLNs, "temp-onflow-only-lns", false, "do not use unless required. forces node to only request collections from onflow collection nodes") flags.BoolVar(&exeConf.enableStorehouse, "enable-storehouse", false, "enable storehouse to store registers on disk, default is false") diff --git a/cmd/node_builder.go b/cmd/node_builder.go index 479d14a1ac0..3a58888a060 100644 --- a/cmd/node_builder.go +++ b/cmd/node_builder.go @@ -4,8 +4,9 @@ import ( "context" "time" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" madns "github.com/multiformats/go-multiaddr-dns" "github.com/onflow/crypto" "github.com/prometheus/client_golang/prometheus" @@ -189,6 +190,8 @@ type BaseConfig struct { // BitswapReprovideEnabled configures whether the Bitswap reprovide mechanism is enabled. // This is only meaningful to Access and Execution nodes. BitswapReprovideEnabled bool + + TransactionFeesDisabled bool } // NodeConfig contains all the derived parameters such the NodeID, private keys etc. and initialized instances of @@ -209,6 +212,7 @@ type NodeConfig struct { ProtocolDB storage.DB SecretsDB *badger.DB Storage Storage + StorageLockMgr lockctx.Manager ProtocolEvents *events.Distributor State protocol.State Resolver madns.BasicResolver @@ -280,7 +284,7 @@ func DefaultBaseConfig() *BaseConfig { BootstrapDir: "bootstrap", datadir: datadir, pebbleDir: pebbleDir, - DBOps: string(dbops.BadgerTransaction), // "badger-transaction" (default) or "batch-update" + DBOps: string(dbops.BadgerBatch), // "badger-batch" (default) or "pebble-batch" badgerDB: nil, pebbleDB: nil, secretsdir: NotSet, diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index f82e83927b6..cbc18ad3264 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -46,6 +46,9 @@ import ( "github.com/onflow/flow-go/engine/access/rest/websockets" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/events" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" rpcConnection "github.com/onflow/flow-go/engine/access/rpc/connection" "github.com/onflow/flow-go/engine/access/state_stream" statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" @@ -187,12 +190,12 @@ func DefaultObserverServiceConfig() *ObserverServiceConfig { CollectionClientTimeout: 3 * time.Second, ExecutionClientTimeout: 3 * time.Second, ConnectionPoolSize: backend.DefaultConnectionPoolSize, - MaxHeightRange: backend.DefaultMaxHeightRange, + MaxHeightRange: events.DefaultMaxHeightRange, PreferredExecutionNodeIDs: nil, FixedExecutionNodeIDs: nil, - ScriptExecutionMode: backend.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now - EventQueryMode: backend.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now - TxResultQueryMode: backend.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now }, RestConfig: rest.Config{ ListenAddress: "", @@ -462,7 +465,7 @@ func (builder *ObserverServiceBuilder) buildFollowerCore() *ObserverServiceBuild builder.Component("follower core", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // create a finalizer that will handle updating the protocol // state when the follower detects newly finalized blocks - final := finalizer.NewFinalizer(node.DB, node.Storage.Headers, builder.FollowerState, node.Tracer) + final := finalizer.NewFinalizer(node.ProtocolDB.Reader(), node.Storage.Headers, builder.FollowerState, node.Tracer) followerCore, err := consensus.NewFollower( node.Logger, @@ -1448,6 +1451,7 @@ func (builder *ObserverServiceBuilder) BuildExecutionSyncComponents() *ObserverS builder.RootChainID.Chain(), indexerDerivedChainData, collectionExecutedMetric, + node.StorageLockMgr, ) if err != nil { return nil, err @@ -1534,7 +1538,7 @@ func (builder *ObserverServiceBuilder) BuildExecutionSyncComponents() *ObserverS } broadcaster := engine.NewBroadcaster() - eventQueryMode, err := backend.ParseIndexQueryMode(builder.rpcConf.BackendConfig.EventQueryMode) + eventQueryMode, err := query_mode.ParseIndexQueryMode(builder.rpcConf.BackendConfig.EventQueryMode) if err != nil { return nil, fmt.Errorf("could not parse event query mode: %w", err) } @@ -1542,7 +1546,7 @@ func (builder *ObserverServiceBuilder) BuildExecutionSyncComponents() *ObserverS // use the events index for events if enabled and the node is configured to use it for // regular event queries useIndex := builder.executionDataIndexingEnabled && - eventQueryMode != backend.IndexQueryModeExecutionNodesOnly + eventQueryMode != query_mode.IndexQueryModeExecutionNodesOnly executionDataTracker := subscriptiontracker.NewExecutionDataTracker( builder.Logger, @@ -1929,6 +1933,27 @@ func (builder *ObserverServiceBuilder) enqueueRPCServer() { return nil, fmt.Errorf("failed to convert node id string to Flow Identifier for fixed EN map: %w", err) } + scriptExecMode, err := query_mode.ParseIndexQueryMode(config.BackendConfig.ScriptExecutionMode) + if err != nil { + return nil, fmt.Errorf("could not parse script execution mode: %w", err) + } + + eventQueryMode, err := query_mode.ParseIndexQueryMode(config.BackendConfig.EventQueryMode) + if err != nil { + return nil, fmt.Errorf("could not parse event query mode: %w", err) + } + if eventQueryMode == query_mode.IndexQueryModeCompare { + return nil, fmt.Errorf("event query mode 'compare' is not supported") + } + + txResultQueryMode, err := query_mode.ParseIndexQueryMode(config.BackendConfig.TxResultQueryMode) + if err != nil { + return nil, fmt.Errorf("could not parse transaction result query mode: %w", err) + } + if txResultQueryMode == query_mode.IndexQueryModeCompare { + return nil, fmt.Errorf("transaction result query mode 'compare' is not supported") + } + execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( node.Logger, node.State, @@ -1952,8 +1977,11 @@ func (builder *ObserverServiceBuilder) enqueueRPCServer() { MaxHeightRange: backendConfig.MaxHeightRange, Log: node.Logger, SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, - Communicator: backend.NewNodeCommunicator(backendConfig.CircuitBreakerConfig.Enabled), + Communicator: node_communicator.NewNodeCommunicator(backendConfig.CircuitBreakerConfig.Enabled), BlockTracker: blockTracker, + ScriptExecutionMode: scriptExecMode, + EventQueryMode: eventQueryMode, + TxResultQueryMode: txResultQueryMode, SubscriptionHandler: subscription.NewSubscriptionHandler( builder.Logger, broadcaster, @@ -1967,8 +1995,8 @@ func (builder *ObserverServiceBuilder) enqueueRPCServer() { } if builder.localServiceAPIEnabled { - backendParams.ScriptExecutionMode = backend.IndexQueryModeLocalOnly - backendParams.EventQueryMode = backend.IndexQueryModeLocalOnly + backendParams.ScriptExecutionMode = query_mode.IndexQueryModeLocalOnly + backendParams.EventQueryMode = query_mode.IndexQueryModeLocalOnly backendParams.TxResultsIndex = builder.TxResultsIndex backendParams.EventsIndex = builder.EventsIndex backendParams.ScriptExecutor = builder.ScriptExecutor diff --git a/cmd/scaffold.go b/cmd/scaffold.go index dc1950cd92a..523b0390048 100644 --- a/cmd/scaffold.go +++ b/cmd/scaffold.go @@ -12,7 +12,7 @@ import ( "time" gcemd "cloud.google.com/go/compute/metadata" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" "github.com/hashicorp/go-multierror" dht "github.com/libp2p/go-libp2p-kad-dht" @@ -78,6 +78,7 @@ import ( "github.com/onflow/flow-go/network/underlay" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" + "github.com/onflow/flow-go/state/protocol/datastore" "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/state/protocol/events/gadgets" "github.com/onflow/flow-go/storage" @@ -276,6 +277,13 @@ func (fnb *FlowNodeBuilder) BaseFlags() { "observer-mode-bootstrap-node-addresses", []string{}, "the network addresses of the bootstrap access node if this is an observer e.g. access-001.mainnet.flow.org:9653,access-002.mainnet.flow.org:9653") + + // TransactionFeesDisabled is a temporary convenience flag for easier testing of cadence compiler changes. This option should not be used if we need to disable fees on a network. + // To disable fees on a network, we need to set the fee price to 0.0. + fnb.flags.BoolVar(&fnb.TransactionFeesDisabled, + "disable-fees", + false, + "Disables calling the transaction fee deduction. This is only for testing purposes. To disable fees on a network it is better to set the fee price to 0.0 .") } func (fnb *FlowNodeBuilder) EnqueuePingService() { @@ -1224,6 +1232,18 @@ func (fnb *FlowNodeBuilder) initSecretsDB() error { return nil } +// initStorageLockManager initializes the lock manager used by the storage layer. +// This manager must be a process-wide singleton. +func (fnb *FlowNodeBuilder) initStorageLockManager() error { + if fnb.StorageLockMgr != nil { + fnb.Logger.Warn().Msgf("storage lock manager already initialized, skipping re-initialization, this should only happen in test case") + return nil + } + + fnb.StorageLockMgr = storage.MakeSingletonLockManager() + return nil +} + func (fnb *FlowNodeBuilder) initStorage() error { // in order to void long iterations with big keys when initializing with an @@ -1236,24 +1256,24 @@ func (fnb *FlowNodeBuilder) initStorage() error { return fmt.Errorf("could not initialize max tracker: %w", err) } - headers := bstorage.NewHeaders(fnb.Metrics.Cache, fnb.DB) - guarantees := bstorage.NewGuarantees(fnb.Metrics.Cache, fnb.DB, fnb.BaseConfig.guaranteesCacheSize) - seals := bstorage.NewSeals(fnb.Metrics.Cache, fnb.DB) - results := bstorage.NewExecutionResults(fnb.Metrics.Cache, fnb.DB) - receipts := bstorage.NewExecutionReceipts(fnb.Metrics.Cache, fnb.DB, results, fnb.BaseConfig.receiptsCacheSize) - index := bstorage.NewIndex(fnb.Metrics.Cache, fnb.DB) - payloads := bstorage.NewPayloads(fnb.DB, index, guarantees, seals, receipts, results) - blocks := bstorage.NewBlocks(fnb.DB, headers, payloads) - qcs := bstorage.NewQuorumCertificates(fnb.Metrics.Cache, fnb.DB, bstorage.DefaultCacheSize) - transactions := bstorage.NewTransactions(fnb.Metrics.Cache, fnb.DB) - collections := bstorage.NewCollections(fnb.DB, transactions) - setups := bstorage.NewEpochSetups(fnb.Metrics.Cache, fnb.DB) - epochCommits := bstorage.NewEpochCommits(fnb.Metrics.Cache, fnb.DB) - protocolState := bstorage.NewEpochProtocolStateEntries(fnb.Metrics.Cache, setups, epochCommits, fnb.DB, - bstorage.DefaultEpochProtocolStateCacheSize, bstorage.DefaultProtocolStateIndexCacheSize) - protocolKVStores := bstorage.NewProtocolKVStore(fnb.Metrics.Cache, fnb.DB, - bstorage.DefaultProtocolKVStoreCacheSize, bstorage.DefaultProtocolKVStoreByBlockIDCacheSize) - versionBeacons := store.NewVersionBeacons(badgerimpl.ToDB(fnb.DB)) + headers := store.NewHeaders(fnb.Metrics.Cache, fnb.ProtocolDB) + guarantees := store.NewGuarantees(fnb.Metrics.Cache, fnb.ProtocolDB, fnb.BaseConfig.guaranteesCacheSize) + seals := store.NewSeals(fnb.Metrics.Cache, fnb.ProtocolDB) + results := store.NewExecutionResults(fnb.Metrics.Cache, fnb.ProtocolDB) + receipts := store.NewExecutionReceipts(fnb.Metrics.Cache, fnb.ProtocolDB, results, fnb.BaseConfig.receiptsCacheSize) + index := store.NewIndex(fnb.Metrics.Cache, fnb.ProtocolDB) + payloads := store.NewPayloads(fnb.ProtocolDB, index, guarantees, seals, receipts, results) + blocks := store.NewBlocks(fnb.ProtocolDB, headers, payloads) + qcs := store.NewQuorumCertificates(fnb.Metrics.Cache, fnb.ProtocolDB, store.DefaultCacheSize) + transactions := store.NewTransactions(fnb.Metrics.Cache, fnb.ProtocolDB) + collections := store.NewCollections(fnb.ProtocolDB, transactions) + setups := store.NewEpochSetups(fnb.Metrics.Cache, fnb.ProtocolDB) + epochCommits := store.NewEpochCommits(fnb.Metrics.Cache, fnb.ProtocolDB) + protocolState := store.NewEpochProtocolStateEntries(fnb.Metrics.Cache, setups, epochCommits, fnb.ProtocolDB, + store.DefaultEpochProtocolStateCacheSize, store.DefaultProtocolStateIndexCacheSize) + protocolKVStores := store.NewProtocolKVStore(fnb.Metrics.Cache, fnb.ProtocolDB, + store.DefaultProtocolKVStoreCacheSize, store.DefaultProtocolKVStoreByBlockIDCacheSize) + versionBeacons := store.NewVersionBeacons(fnb.ProtocolDB) fnb.Storage = Storage{ Headers: headers, @@ -1335,7 +1355,7 @@ func (fnb *FlowNodeBuilder) InitIDProviders() { node.SyncEngineIdentifierProvider = id.NewIdentityFilterIdentifierProvider( filter.And( - filter.HasRole[flow.Identity](flow.RoleConsensus), + filter.HasRole[flow.Identity](flow.RoleExecution), filter.Not(filter.HasNodeID[flow.Identity](node.Me.NodeID())), filter.NotEjectedFilter, ), @@ -1348,7 +1368,7 @@ func (fnb *FlowNodeBuilder) InitIDProviders() { func (fnb *FlowNodeBuilder) initState() error { fnb.ProtocolEvents = events.NewDistributor() - isBootStrapped, err := badgerState.IsBootstrapped(fnb.DB) + isBootStrapped, err := badgerState.IsBootstrapped(fnb.ProtocolDB) if err != nil { return fmt.Errorf("failed to determine whether database contains bootstrapped state: %w", err) } @@ -1357,7 +1377,8 @@ func (fnb *FlowNodeBuilder) initState() error { fnb.Logger.Info().Msg("opening already bootstrapped protocol state") state, err := badgerState.OpenState( fnb.Metrics.Compliance, - fnb.DB, + fnb.ProtocolDB, + fnb.StorageLockMgr, fnb.Storage.Headers, fnb.Storage.Seals, fnb.Storage.Results, @@ -1406,7 +1427,8 @@ func (fnb *FlowNodeBuilder) initState() error { fnb.State, err = badgerState.Bootstrap( fnb.Metrics.Compliance, - fnb.DB, + fnb.ProtocolDB, + fnb.StorageLockMgr, fnb.Storage.Headers, fnb.Storage.Seals, fnb.Storage.Results, @@ -1471,7 +1493,7 @@ func (fnb *FlowNodeBuilder) setRootSnapshot(rootSnapshot protocol.Snapshot) erro var err error // validate the root snapshot QCs - err = badgerState.IsValidRootSnapshotQCs(rootSnapshot) + err = datastore.IsValidRootSnapshotQCs(rootSnapshot) if err != nil { return fmt.Errorf("failed to validate root snapshot QCs: %w", err) } @@ -1588,7 +1610,9 @@ func (fnb *FlowNodeBuilder) initLocal() error { func (fnb *FlowNodeBuilder) initFvmOptions() { fnb.FvmOptions = initialize.InitFvmOptions( - fnb.RootChainID, fnb.Storage.Headers, + fnb.RootChainID, + fnb.Storage.Headers, + fnb.BaseConfig.TransactionFeesDisabled, ) } @@ -2138,6 +2162,10 @@ func (fnb *FlowNodeBuilder) onStart() error { return err } + if err := fnb.initStorageLockManager(); err != nil { + return err + } + // we always initialize both badger and pebble databases // even if we only use one of them, this simplify the code and checks if err := fnb.initBadgerDB(); err != nil { diff --git a/cmd/scaffold/pebble_db.go b/cmd/scaffold/pebble_db.go index 100cc7e72e3..e620c467d86 100644 --- a/cmd/scaffold/pebble_db.go +++ b/cmd/scaffold/pebble_db.go @@ -5,7 +5,7 @@ import ( "io" "os" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/rs/zerolog" pebblestorage "github.com/onflow/flow-go/storage/pebble" @@ -26,7 +26,7 @@ func InitPebbleDB(logger zerolog.Logger, dir string) (*pebble.DB, io.Closer, err return nil, nil, fmt.Errorf("could not create pebble db (path: %s): %w", dir, err) } - db, err := pebblestorage.OpenDefaultPebbleDB(logger, dir) + db, err := pebblestorage.SafeOpen(logger, dir) if err != nil { return nil, nil, fmt.Errorf("could not open newly created pebble db (path: %s): %w", dir, err) } diff --git a/cmd/util/cmd/check-storage/evm_account_storage_health_test.go b/cmd/util/cmd/check-storage/evm_account_storage_health_test.go index 59cbe2163c5..aa8cf3a3f1f 100644 --- a/cmd/util/cmd/check-storage/evm_account_storage_health_test.go +++ b/cmd/util/cmd/check-storage/evm_account_storage_health_test.go @@ -8,11 +8,11 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/exp/maps" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/onflow/atree" "github.com/onflow/cadence/common" "github.com/onflow/cadence/interpreter" "github.com/onflow/cadence/runtime" - gethCommon "github.com/onflow/go-ethereum/common" "github.com/onflow/flow-go/cmd/util/ledger/util" "github.com/onflow/flow-go/cmd/util/ledger/util/registers" diff --git a/cmd/util/cmd/common/flags.go b/cmd/util/cmd/common/flags.go index 9562bfa5b09..ac84464e101 100644 --- a/cmd/util/cmd/common/flags.go +++ b/cmd/util/cmd/common/flags.go @@ -31,7 +31,7 @@ func InitWithDBFlags(cmd *cobra.Command) { cmd.PersistentFlags().StringVar(&flagPebbleDir, "pebble-dir", DefaultPebbleDir, "directory to the pebble dababase") - cmd.PersistentFlags().StringVar(&flagUseDB, "use-db", DefaultDB, "the database type to use, --badger or --pebble") + cmd.PersistentFlags().StringVar(&flagUseDB, "use-db", DefaultDB, "the database type to use, badger or pebble") } // ReadDBFlags is to read the database flags diff --git a/cmd/util/cmd/common/node_info.go b/cmd/util/cmd/common/node_info.go index 33522aa70a9..b5f1254cf87 100644 --- a/cmd/util/cmd/common/node_info.go +++ b/cmd/util/cmd/common/node_info.go @@ -64,6 +64,7 @@ func ReadFullPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNod weight, partner.NetworkPubKey.PublicKey, partner.StakingPubKey.PublicKey, + partner.StakingPoP.Signature, ) nodes = append(nodes, node) } @@ -136,7 +137,7 @@ func ReadFullInternalNodeInfos(log zerolog.Logger, internalNodePrivInfoDir, inte log.Info().Msgf("read %d weights for internal nodes", len(weights)) var nodes []bootstrap.NodeInfo - for _, internal := range privInternals { + for i, internal := range privInternals { // check if address is valid format ValidateAddressFormat(log, internal.Address) @@ -154,14 +155,17 @@ func ReadFullInternalNodeInfos(log zerolog.Logger, internalNodePrivInfoDir, inte log.Warn().Msgf("internal node (id=%x) has non-default weight (%d != %d)", internal.NodeID, weight, flow.DefaultInitialWeight) } - node := bootstrap.NewPrivateNodeInfo( + node, err := bootstrap.NewPrivateNodeInfo( internal.NodeID, internal.Role, internal.Address, weight, - internal.NetworkPrivKey, - internal.StakingPrivKey, + internal.NetworkPrivKey.PrivateKey, + internal.StakingPrivKey.PrivateKey, ) + if err != nil { + return nil, fmt.Errorf("failed to build private node info at index %d: %w", i, err) + } nodes = append(nodes, node) } diff --git a/cmd/util/cmd/common/state.go b/cmd/util/cmd/common/state.go index 3ca3c82aa54..859c4834666 100644 --- a/cmd/util/cmd/common/state.go +++ b/cmd/util/cmd/common/state.go @@ -3,26 +3,28 @@ package common import ( "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/state/protocol" protocolbadger "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) -func InitProtocolState(db *badger.DB, storages *storage.All) (protocol.State, error) { +func OpenProtocolState(lockManager lockctx.Manager, db storage.DB, storages *store.All) (protocol.State, error) { metrics := &metrics.NoopCollector{} protocolState, err := protocolbadger.OpenState( metrics, db, + lockManager, storages.Headers, storages.Seals, storages.Results, storages.Blocks, storages.QuorumCertificates, - storages.Setups, + storages.EpochSetups, storages.EpochCommits, storages.EpochProtocolStateEntries, storages.ProtocolKVStore, @@ -30,7 +32,7 @@ func InitProtocolState(db *badger.DB, storages *storage.All) (protocol.State, er ) if err != nil { - return nil, fmt.Errorf("could not init protocol state: %w", err) + return nil, fmt.Errorf("could not open protocol state: %w", err) } return protocolState, nil diff --git a/cmd/util/cmd/common/storage.go b/cmd/util/cmd/common/storage.go index 8852eeaea71..894d36ac6f7 100644 --- a/cmd/util/cmd/common/storage.go +++ b/cmd/util/cmd/common/storage.go @@ -3,7 +3,7 @@ package common import ( "fmt" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" "github.com/rs/zerolog/log" @@ -107,10 +107,36 @@ func InitBadgerStorage(flags DBFlags) (*badger.DB, error) { return nil, fmt.Errorf("only badger db is supported, got: %s", datadir.UseDB) } - return InitStorage(datadir.DBDir), nil + return InitBadgerDBStorage(datadir.DBDir), nil } -func InitStorage(datadir string) *badger.DB { +func InitStorage(datadir string) (storage.DB, error) { + ok, err := IsBadgerFolder(datadir) + if err != nil { + return nil, err + } + if ok { + log.Info().Msgf("using badger db at %s", datadir) + return badgerimpl.ToDB(InitBadgerDBStorage(datadir)), nil + } + + ok, err = IsPebbleFolder(datadir) + if err != nil { + return nil, err + } + if ok { + db, err := pebblestorage.ShouldOpenDefaultPebbleDB(log.Logger, datadir) + if err != nil { + return nil, fmt.Errorf("could not open pebble db at %s: %w", datadir, err) + } + log.Info().Msgf("using pebble db at %s", datadir) + return pebbleimpl.ToDB(db), nil + } + + return nil, fmt.Errorf("could not determine storage type (not badger, nor pebble) for directory %s", datadir) +} + +func InitBadgerDBStorage(datadir string) *badger.DB { return InitStorageWithTruncate(datadir, false) } @@ -121,7 +147,7 @@ func InitStorageWithTruncate(datadir string, truncate bool) *badger.DB { WithLogger(nil). WithTruncate(truncate) - db, err := badger.Open(opts) + db, err := storagebadger.SafeOpen(opts) if err != nil { log.Fatal().Err(err).Msg("could not open key-value store") } @@ -139,7 +165,26 @@ func InitStorageWithTruncate(datadir string, truncate bool) *badger.DB { return db } -func InitStorages(db *badger.DB) *storage.All { +// IsBadgerFolder checks if the given directory is a badger folder. +// It returns error if the folder is empty or not exists. +// it returns error if the folder is not empty, but misses some required badger files. +func IsBadgerFolder(dataDir string) (bool, error) { + return storagebadger.IsBadgerFolder(dataDir) +} + +// IsPebbleFolder checks if the given directory is a pebble folder. +// It returns error if the folder is empty or not exists. +// it returns error if the folder is not empty, but misses some required pebble files. +func IsPebbleFolder(dataDir string) (bool, error) { + return pebblestorage.IsPebbleFolder(dataDir) +} + +func InitStorages(db storage.DB) *store.All { + metrics := &metrics.NoopCollector{} + return store.InitAll(metrics, db) +} + +func InitBadgerStorages(db *badger.DB) *storage.All { metrics := &metrics.NoopCollector{} return storagebadger.InitAll(metrics, db) @@ -174,7 +219,7 @@ func WithStorage(flags DBFlags, f func(storage.DB) error) error { log.Info().Msgf("using %s db at %s", usedDir.UseDB, usedDir.DBDir) if usedDir.UseDB == UsedDBPebble { - db, err := pebblestorage.MustOpenDefaultPebbleDB(log.Logger, usedDir.DBDir) + db, err := pebblestorage.ShouldOpenDefaultPebbleDB(log.Logger, usedDir.DBDir) if err != nil { return fmt.Errorf("can not open pebble db at %v: %w", usedDir.DBDir, err) } @@ -185,7 +230,7 @@ func WithStorage(flags DBFlags, f func(storage.DB) error) error { } if usedDir.UseDB == UsedDBBadger { - db := InitStorage(usedDir.DBDir) + db := InitBadgerDBStorage(usedDir.DBDir) defer db.Close() return f(badgerimpl.ToDB(db)) @@ -196,13 +241,13 @@ func WithStorage(flags DBFlags, f func(storage.DB) error) error { // InitBadgerAndPebble initializes the badger and pebble storages func InitBadgerAndPebble(dirs TwoDBDirs) (bdb *badger.DB, pdb *pebble.DB, err error) { - pdb, err = pebblestorage.MustOpenDefaultPebbleDB( + pdb, err = pebblestorage.ShouldOpenDefaultPebbleDB( log.Logger.With().Str("pebbledb", "protocol").Logger(), dirs.PebbleDir) if err != nil { return nil, nil, err } - bdb = InitStorage(dirs.BadgerDir) + bdb = InitBadgerDBStorage(dirs.BadgerDir) return bdb, pdb, nil } diff --git a/cmd/util/cmd/db-migration/cmd.go b/cmd/util/cmd/db-migration/cmd.go new file mode 100644 index 00000000000..28a4caf28ba --- /dev/null +++ b/cmd/util/cmd/db-migration/cmd.go @@ -0,0 +1,93 @@ +package db + +import ( + "fmt" + "time" + + "github.com/docker/go-units" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/storage/migration" +) + +var ( + flagBadgerDBdir string + flagPebbleDBdir string + flagBatchByteSize int + flagReaderCount int + flagWriterCount int + flagReaderShardPrefixBytes int + flagValidationMode string + flagValidationOnly bool +) + +var Cmd = &cobra.Command{ + Use: "db-migration", + Short: "copy badger db to pebble db", + RunE: run, +} + +func init() { + Cmd.Flags().StringVar(&flagBadgerDBdir, "datadir", "", "BadgerDB Dir to copy data from") + _ = Cmd.MarkFlagRequired("datadir") + + Cmd.Flags().StringVar(&flagPebbleDBdir, "pebbledir", "", "PebbleDB Dir to copy data to") + _ = Cmd.MarkFlagRequired("pebbledir") + + Cmd.Flags().IntVar(&flagBatchByteSize, "batch_byte_size", migration.DefaultMigrationConfig.BatchByteSize, + "the batch size in bytes to use for migration (32MB by default)") + + Cmd.Flags().IntVar(&flagReaderCount, "reader_count", migration.DefaultMigrationConfig.ReaderWorkerCount, + "the number of reader workers to use for migration") + + Cmd.Flags().IntVar(&flagWriterCount, "writer_count", migration.DefaultMigrationConfig.WriterWorkerCount, + "the number of writer workers to use for migration") + + Cmd.Flags().IntVar(&flagReaderShardPrefixBytes, "reader_shard_prefix_bytes", migration.DefaultMigrationConfig.ReaderShardPrefixBytes, + "the number of prefix bytes used to assign iterator workload") + + Cmd.Flags().StringVar(&flagValidationMode, "validation_mode", string(migration.DefaultMigrationConfig.ValidationMode), + "the validation mode to use for migration (partial or full, default is partial)") + + Cmd.Flags().BoolVar(&flagValidationOnly, "validation_only", false, + "if set, only validate the data in the badger db without copying it to pebble db. "+ + "Note: this will not copy any data to pebble db, and will not create any pebble db files.") +} + +func run(*cobra.Command, []string) error { + lg := log.With(). + Str("badger_db_dir", flagBadgerDBdir). + Str("pebble_db_dir", flagPebbleDBdir). + Str("batch_byte_size", units.HumanSize(float64(flagBatchByteSize))). + Int("reader_count", flagReaderCount). + Int("writer_count", flagWriterCount). + Int("reader_shard_prefix_bytes", flagReaderShardPrefixBytes). + Str("validation_mode", flagValidationMode). + Bool("validation_only", flagValidationOnly). + Logger() + + validationMode, err := migration.ParseValidationModeValid(flagValidationMode) + if err != nil { + return fmt.Errorf("invalid validation mode: %w", err) + } + + lg.Info().Msgf("starting migration from badger db to pebble db") + start := time.Now() + err = migration.RunMigrationAndCompaction(flagBadgerDBdir, flagPebbleDBdir, migration.MigrationConfig{ + BatchByteSize: flagBatchByteSize, + ReaderWorkerCount: flagReaderCount, + WriterWorkerCount: flagWriterCount, + ReaderShardPrefixBytes: flagReaderShardPrefixBytes, + ValidationMode: validationMode, + ValidationOnly: flagValidationOnly, + }) + + if err != nil { + return fmt.Errorf("migration failed: %w", err) + } + + lg.Info().Msgf("migration completed successfully in %s", time.Since(start).String()) + + return nil +} diff --git a/cmd/util/cmd/exec-data-json-export/block_exporter.go b/cmd/util/cmd/exec-data-json-export/block_exporter.go index 27325b2d228..5dd23e6c0fd 100644 --- a/cmd/util/cmd/exec-data-json-export/block_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/block_exporter.go @@ -12,8 +12,6 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/store" ) @@ -36,21 +34,22 @@ type blockSummary struct { func ExportBlocks(blockID flow.Identifier, dbPath string, outputPath string) (flow.StateCommitment, error) { // traverse backward from the given block (parent block) and fetch by blockHash - db := common.InitStorage(dbPath) + db, err := common.InitStorage(dbPath) + if err != nil { + return flow.DummyStateCommitment, fmt.Errorf("could not initialize storage: %w", err) + } defer db.Close() - sdb := badgerimpl.ToDB(db) - cacheMetrics := &metrics.NoopCollector{} - headers := badger.NewHeaders(cacheMetrics, db) - index := badger.NewIndex(cacheMetrics, db) - guarantees := badger.NewGuarantees(cacheMetrics, db, badger.DefaultCacheSize) - seals := badger.NewSeals(cacheMetrics, db) - results := badger.NewExecutionResults(cacheMetrics, db) - receipts := badger.NewExecutionReceipts(cacheMetrics, db, results, badger.DefaultCacheSize) - payloads := badger.NewPayloads(db, index, guarantees, seals, receipts, results) - blocks := badger.NewBlocks(db, headers, payloads) - commits := store.NewCommits(&metrics.NoopCollector{}, sdb) + headers := store.NewHeaders(cacheMetrics, db) + index := store.NewIndex(cacheMetrics, db) + guarantees := store.NewGuarantees(cacheMetrics, db, store.DefaultCacheSize) + seals := store.NewSeals(cacheMetrics, db) + results := store.NewExecutionResults(cacheMetrics, db) + receipts := store.NewExecutionReceipts(cacheMetrics, db, results, store.DefaultCacheSize) + payloads := store.NewPayloads(db, index, guarantees, seals, receipts, results) + blocks := store.NewBlocks(db, headers, payloads) + commits := store.NewCommits(&metrics.NoopCollector{}, db) activeBlockID := blockID outputFile := filepath.Join(outputPath, "blocks.jsonl") diff --git a/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go b/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go index af8dc3ba898..f08e695e1e2 100644 --- a/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go @@ -11,9 +11,8 @@ import ( "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" "github.com/onflow/flow-go/storage/operation" - "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/store" ) type dSnapshot struct { @@ -25,12 +24,14 @@ type dSnapshot struct { func ExportDeltaSnapshots(blockID flow.Identifier, dbPath string, outputPath string) error { // traverse backward from the given block (parent block) and fetch by blockHash - db := common.InitStorage(dbPath) + db, err := common.InitStorage(dbPath) + if err != nil { + return fmt.Errorf("could not initialize storage: %w", err) + } defer db.Close() - sdb := badgerimpl.ToDB(db) cacheMetrics := &metrics.NoopCollector{} - headers := badger.NewHeaders(cacheMetrics, db) + headers := store.NewHeaders(cacheMetrics, db) activeBlockID := blockID outputFile := filepath.Join(outputPath, "delta.jsonl") @@ -52,7 +53,7 @@ func ExportDeltaSnapshots(blockID flow.Identifier, dbPath string, outputPath str } var snap []*snapshot.ExecutionSnapshot - err = operation.RetrieveExecutionStateInteractions(sdb.Reader(), activeBlockID, &snap) + err = operation.RetrieveExecutionStateInteractions(db.Reader(), activeBlockID, &snap) if err != nil { return fmt.Errorf("could not load delta snapshot: %w", err) } diff --git a/cmd/util/cmd/exec-data-json-export/event_exporter.go b/cmd/util/cmd/exec-data-json-export/event_exporter.go index b516b4d4d3c..c14c71536e9 100644 --- a/cmd/util/cmd/exec-data-json-export/event_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/event_exporter.go @@ -11,8 +11,6 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/store" ) @@ -30,14 +28,15 @@ type event struct { func ExportEvents(blockID flow.Identifier, dbPath string, outputPath string) error { // traverse backward from the given block (parent block) and fetch by blockHash - db := common.InitStorage(dbPath) + db, err := common.InitStorage(dbPath) + if err != nil { + return fmt.Errorf("could not initialize storage: %w", err) + } defer db.Close() - sdb := badgerimpl.ToDB(db) - cacheMetrics := &metrics.NoopCollector{} - headers := badger.NewHeaders(cacheMetrics, db) - events := store.NewEvents(cacheMetrics, sdb) + headers := store.NewHeaders(cacheMetrics, db) + events := store.NewEvents(cacheMetrics, db) activeBlockID := blockID outputFile := filepath.Join(outputPath, "events.jsonl") diff --git a/cmd/util/cmd/exec-data-json-export/result_exporter.go b/cmd/util/cmd/exec-data-json-export/result_exporter.go index df187a9aa87..074f5dcf778 100644 --- a/cmd/util/cmd/exec-data-json-export/result_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/result_exporter.go @@ -11,7 +11,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/store" ) type result struct { @@ -26,12 +26,15 @@ type result struct { func ExportResults(blockID flow.Identifier, dbPath string, outputPath string) error { // traverse backward from the given block (parent block) and fetch by blockHash - db := common.InitStorage(dbPath) + db, err := common.InitStorage(dbPath) + if err != nil { + return fmt.Errorf("could not initialize storage: %w", err) + } defer db.Close() cacheMetrics := &metrics.NoopCollector{} - headers := badger.NewHeaders(cacheMetrics, db) - results := badger.NewExecutionResults(cacheMetrics, db) + headers := store.NewHeaders(cacheMetrics, db) + results := store.NewExecutionResults(cacheMetrics, db) activeBlockID := blockID outputFile := filepath.Join(outputPath, "results.jsonl") diff --git a/cmd/util/cmd/exec-data-json-export/transaction_exporter.go b/cmd/util/cmd/exec-data-json-export/transaction_exporter.go index fe3fb41519c..f2871a5e560 100644 --- a/cmd/util/cmd/exec-data-json-export/transaction_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/transaction_exporter.go @@ -11,7 +11,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/store" ) // TODO add status, events as repeated, gas used, ErrorMessage , register touches @@ -42,20 +42,23 @@ type transactionInContext struct { func ExportExecutedTransactions(blockID flow.Identifier, dbPath string, outputPath string) error { // traverse backward from the given block (parent block) and fetch by blockHash - db := common.InitStorage(dbPath) + db, err := common.InitStorage(dbPath) + if err != nil { + return fmt.Errorf("could not initialize storage: %w", err) + } defer db.Close() cacheMetrics := &metrics.NoopCollector{} - index := badger.NewIndex(cacheMetrics, db) - guarantees := badger.NewGuarantees(cacheMetrics, db, badger.DefaultCacheSize) - seals := badger.NewSeals(cacheMetrics, db) - results := badger.NewExecutionResults(cacheMetrics, db) - receipts := badger.NewExecutionReceipts(cacheMetrics, db, results, badger.DefaultCacheSize) - transactions := badger.NewTransactions(cacheMetrics, db) - headers := badger.NewHeaders(cacheMetrics, db) - payloads := badger.NewPayloads(db, index, guarantees, seals, receipts, results) - blocks := badger.NewBlocks(db, headers, payloads) - collections := badger.NewCollections(db, transactions) + index := store.NewIndex(cacheMetrics, db) + guarantees := store.NewGuarantees(cacheMetrics, db, store.DefaultCacheSize) + seals := store.NewSeals(cacheMetrics, db) + results := store.NewExecutionResults(cacheMetrics, db) + receipts := store.NewExecutionReceipts(cacheMetrics, db, results, store.DefaultCacheSize) + transactions := store.NewTransactions(cacheMetrics, db) + headers := store.NewHeaders(cacheMetrics, db) + payloads := store.NewPayloads(db, index, guarantees, seals, receipts, results) + blocks := store.NewBlocks(db, headers, payloads) + collections := store.NewCollections(db, transactions) activeBlockID := blockID outputFile := filepath.Join(outputPath, "transactions.jsonl") diff --git a/cmd/util/cmd/execution-state-extract/cmd.go b/cmd/util/cmd/execution-state-extract/cmd.go index 9ed5640f21d..314ed1c05ae 100644 --- a/cmd/util/cmd/execution-state-extract/cmd.go +++ b/cmd/util/cmd/execution-state-extract/cmd.go @@ -17,48 +17,34 @@ import ( "github.com/onflow/flow-go/cmd/util/ledger/migrations" "github.com/onflow/flow-go/cmd/util/ledger/reporters" "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/complete/wal" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/store" ) var ( - flagExecutionStateDir string - flagOutputDir string - flagBlockHash string - flagStateCommitment string - flagDatadir string - flagChain string - flagNWorker int - flagNoMigration bool - flagMigration string - flagNoReport bool - flagValidateMigration bool - flagAllowPartialStateFromPayloads bool - flagSortPayloads bool - flagPrune bool - flagLogVerboseValidationError bool - flagDiffMigration bool - flagLogVerboseDiff bool - flagVerboseErrorOutput bool - flagStagedContractsFile string - flagContinueMigrationOnValidationError bool - flagCheckStorageHealthBeforeMigration bool - flagCheckStorageHealthAfterMigration bool - flagInputPayloadFileName string - flagOutputPayloadFileName string - flagOutputPayloadByAddresses string - flagMaxAccountSize uint64 - flagFixSlabsWithBrokenReferences bool - flagFilterUnreferencedSlabs bool - flagCPUProfile string - flagReportMetrics bool - flagCacheStaticTypeMigrationResults bool - flagCacheEntitlementsMigrationResults bool + flagExecutionStateDir string + flagOutputDir string + flagBlockHash string + flagStateCommitment string + flagDatadir string + flagChain string + flagNWorker int + flagNoMigration bool + flagMigration string + flagNoReport bool + flagAllowPartialStateFromPayloads bool + flagSortPayloads bool + flagPrune bool + flagInputPayloadFileName string + flagOutputPayloadFileName string + flagOutputPayloadByAddresses string + flagCPUProfile string + flagZeroMigration bool ) var Cmd = &cobra.Command{ @@ -91,6 +77,9 @@ func init() { Cmd.Flags().BoolVar(&flagNoMigration, "no-migration", false, "don't migrate the state") + Cmd.Flags().BoolVar(&flagZeroMigration, "estimate-migration-duration", false, + "run zero migrations to get minimum duration needed by migrations (load execution state, group payloads by account, iterate account payloads, create trie from payload, and generate checkpoint)") + Cmd.Flags().StringVar(&flagMigration, "migration", "", "migration name") Cmd.Flags().BoolVar(&flagNoReport, "no-report", false, @@ -98,36 +87,9 @@ func init() { Cmd.Flags().IntVar(&flagNWorker, "n-migrate-worker", 10, "number of workers to migrate payload concurrently") - Cmd.Flags().BoolVar(&flagValidateMigration, "validate", false, - "validate migrated Cadence values (atree migration)") - - Cmd.Flags().BoolVar(&flagLogVerboseValidationError, "log-verbose-validation-error", false, - "log entire Cadence values on validation error (atree migration)") - - Cmd.Flags().BoolVar(&flagDiffMigration, "diff", false, - "compare Cadence values and log diff (migration)") - - Cmd.Flags().BoolVar(&flagLogVerboseDiff, "log-verbose-diff", false, - "log entire Cadence values on diff (requires --diff flag)") - - Cmd.Flags().BoolVar(&flagVerboseErrorOutput, "verbose-error-output", true, - "log verbose output on migration errors") - - Cmd.Flags().StringVar(&flagStagedContractsFile, "staged-contracts", "", - "Staged contracts CSV file") - Cmd.Flags().BoolVar(&flagAllowPartialStateFromPayloads, "allow-partial-state-from-payload-file", false, "allow input payload file containing partial state (e.g. not all accounts)") - Cmd.Flags().BoolVar(&flagCheckStorageHealthBeforeMigration, "check-storage-health-before", false, - "check (atree) storage health before migration") - - Cmd.Flags().BoolVar(&flagCheckStorageHealthAfterMigration, "check-storage-health-after", false, - "check (atree) storage health after migration") - - Cmd.Flags().BoolVar(&flagContinueMigrationOnValidationError, "continue-migration-on-validation-errors", false, - "continue migration even if validation fails") - Cmd.Flags().BoolVar(&flagSortPayloads, "sort-payloads", true, "sort payloads (generate deterministic output; disable only for development purposes)") @@ -164,26 +126,8 @@ func init() { "extract payloads of addresses (comma separated hex-encoded addresses) to file specified by output-payload-filename", ) - Cmd.Flags().Uint64Var(&flagMaxAccountSize, "max-account-size", 0, - "max account size") - - Cmd.Flags().BoolVar(&flagFixSlabsWithBrokenReferences, "fix-testnet-slabs-with-broken-references", false, - "fix slabs with broken references in testnet") - - Cmd.Flags().BoolVar(&flagFilterUnreferencedSlabs, "filter-unreferenced-slabs", false, - "filter unreferenced slabs") - Cmd.Flags().StringVar(&flagCPUProfile, "cpu-profile", "", "enable CPU profiling") - - Cmd.Flags().BoolVar(&flagReportMetrics, "report-metrics", false, - "report migration metrics") - - Cmd.Flags().BoolVar(&flagCacheStaticTypeMigrationResults, "cache-static-type-migration", false, - "cache static type migration results") - - Cmd.Flags().BoolVar(&flagCacheEntitlementsMigrationResults, "cache-entitlements-migration", false, - "cache entitlements migration results") } func run(*cobra.Command, []string) { @@ -206,6 +150,11 @@ func run(*cobra.Command, []string) { log.Fatal().Err(err).Msgf("cannot create output directory %s", flagOutputDir) } + if flagNoMigration && flagZeroMigration { + log.Fatal().Msg("cannot run the command with both --no-migration and --estimate-migration-duration flags, one of them or none of them should be provided") + return + } + if len(flagBlockHash) > 0 && len(flagStateCommitment) > 0 { log.Fatal().Msg("cannot run the command with both block hash and state commitment as inputs, only one of them should be provided") return @@ -224,10 +173,6 @@ func run(*cobra.Command, []string) { log.Fatal().Msg("--extract-payloads-by-address requires --output-payload-filename to be specified") } - if flagValidateMigration && flagDiffMigration { - log.Fatal().Msg("Both --validate and --diff are enabled, please specify only one (or none) of these") - } - var stateCommitment flow.StateCommitment if len(flagBlockHash) > 0 { @@ -238,11 +183,14 @@ func run(*cobra.Command, []string) { log.Info().Msgf("extracting state by block ID: %v", blockID) - db := common.InitStorage(flagDatadir) + db, err := common.InitStorage(flagDatadir) + if err != nil { + log.Fatal().Err(err).Msgf("cannot initialize storage with datadir %s", flagDatadir) + } defer db.Close() cache := &metrics.NoopCollector{} - commits := store.NewCommits(cache, badgerimpl.ToDB(db)) + commits := store.NewCommits(cache, db) stateCommitment, err = commits.ByBlockID(blockID) if err != nil { @@ -316,34 +264,6 @@ func run(*cobra.Command, []string) { log.Warn().Msgf("--no-report flag is deprecated") } - if flagValidateMigration { - log.Warn().Msgf("--validate flag is enabled and will increase duration of migration") - } - - if flagLogVerboseValidationError { - log.Warn().Msgf("--log-verbose-validation-error flag is enabled which may increase size of log") - } - - if flagDiffMigration { - log.Warn().Msgf("--diff flag is enabled and will increase duration of migration") - } - - if flagLogVerboseDiff { - log.Warn().Msgf("--log-verbose-diff flag is enabled which may increase size of log") - } - - if flagVerboseErrorOutput { - log.Warn().Msgf("--verbose-error-output flag is enabled which may increase size of log") - } - - if flagCheckStorageHealthBeforeMigration { - log.Warn().Msgf("--check-storage-health-before flag is enabled and will increase duration of migration") - } - - if flagCheckStorageHealthAfterMigration { - log.Warn().Msgf("--check-storage-health-after flag is enabled and will increase duration of migration") - } - var inputMsg string if len(flagInputPayloadFileName) > 0 { // Input is payloads @@ -403,6 +323,21 @@ func run(*cobra.Command, []string) { return } + if flagZeroMigration { + newStateCommitment, err := emptyMigration( + log.Logger, + flagExecutionStateDir, + flagOutputDir, + stateCommitment) + if err != nil { + log.Fatal().Err(err).Msgf("error extracting state for commitment %s", stateCommitment) + } + if stateCommitment != flow.StateCommitment(newStateCommitment) { + log.Fatal().Err(err).Msgf("empty migration failed: state commitments are different: %v != %s", stateCommitment, newStateCommitment) + } + return + } + var extractor extractor if len(flagInputPayloadFileName) > 0 { extractor = newPayloadFileExtractor(log.Logger, flagInputPayloadFileName) @@ -506,6 +441,66 @@ func extractStateToCheckpointWithoutMigration( return createCheckpoint(logger, newTrie, outputDir, bootstrap.FilenameWALRootCheckpoint) } +func emptyMigration( + logger zerolog.Logger, + executionStateDir string, + outputDir string, + stateCommitment flow.StateCommitment, +) (ledger.State, error) { + + log.Info().Msgf("Loading state with commitment %s", stateCommitment) + + // Load state for given state commitment + trie, err := util.ReadTrie(executionStateDir, stateCommitment) + if err != nil { + return ledger.DummyState, fmt.Errorf("failed to load state: %w", err) + } + + log.Info().Msgf("Getting payloads from loaded state") + + // Get payloads from trie. + payloads := trie.AllPayloads() + + log.Info().Msgf("Migrating %d payloads", len(payloads)) + + // Migrate payloads (migration is no-op) + migs := []migrations.NamedMigration{ + { + Name: "empty migration", + Migrate: func(*registers.ByAccount) error { + return nil + }, + }, + } + + migration := newMigration(log.Logger, migs, flagNWorker) + + migratedPayloads, err := migration(payloads) + if err != nil { + return ledger.DummyState, fmt.Errorf("failed to migrate payloads: %w", err) + } + + log.Info().Msgf("Migrated %d payloads", len(migratedPayloads)) + + // Create trie from migrated payloads + migratedTrie, err := createTrieFromPayloads(log.Logger, payloads) + if err != nil { + return ledger.DummyState, fmt.Errorf("failed to create new trie from migrated payloads: %w", err) + } + + log.Info().Msgf("Created trie from migrated payloads with commitment %s", migratedTrie.RootHash()) + + // Create checkpoint files + newState, err := createCheckpoint(logger, migratedTrie, outputDir, bootstrap.FilenameWALRootCheckpoint) + if err != nil { + return ledger.DummyState, fmt.Errorf("failed to create checkpoint: %w", err) + } + + log.Info().Msgf("Created checkpoint") + + return newState, nil +} + func ensureCheckpointFileExist(dir string) error { checkpoints, err := wal.Checkpoints(dir) if err != nil { diff --git a/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go b/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go index 3650385920d..c1b862b2518 100644 --- a/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go +++ b/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go @@ -14,7 +14,6 @@ import ( runtimeCommon "github.com/onflow/cadence/common" - "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/cmd/util/ledger/util" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/pathfinder" @@ -22,7 +21,9 @@ import ( "github.com/onflow/flow-go/ledger/complete/wal" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -36,10 +37,14 @@ func TestExtractExecutionState(t *testing.T) { metr := &metrics.NoopCollector{} t.Run("missing block->state commitment mapping", func(t *testing.T) { - withDirs(t, func(datadir, execdir, outdir string) { - db := common.InitStorage(datadir) - commits := store.NewCommits(metr, badgerimpl.ToDB(db)) + // Initialize a proper Badger database instead of using empty directory + db := unittest.PebbleDB(t, datadir) + defer db.Close() + + // Convert to storage.DB interface + storageDB := pebbleimpl.ToDB(db) + commits := store.NewCommits(metr, storageDB) _, err := commits.ByBlockID(unittest.IdentifierFixture()) require.Error(t, err) @@ -47,16 +52,27 @@ func TestExtractExecutionState(t *testing.T) { }) t.Run("retrieves block->state mapping", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() withDirs(t, func(datadir, execdir, outdir string) { - db := common.InitStorage(datadir) - commits := store.NewCommits(metr, badgerimpl.ToDB(db)) + // Initialize a proper Badger database instead of using empty directory + db := unittest.PebbleDB(t, datadir) + defer db.Close() + + // Convert to storage.DB interface + storageDB := pebbleimpl.ToDB(db) + commits := store.NewCommits(metr, storageDB) blockID := unittest.IdentifierFixture() stateCommitment := unittest.StateCommitmentFixture() - err := commits.Store(blockID, stateCommitment) - require.NoError(t, err) + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) + require.NoError(t, storageDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // Store the state commitment for the block ID + return operation.IndexStateCommitment(lctx, rw, blockID, stateCommitment) + })) + lctx.Release() retrievedStateCommitment, err := commits.ByBlockID(blockID) require.NoError(t, err) @@ -76,16 +92,20 @@ func TestExtractExecutionState(t *testing.T) { }) t.Run("happy path", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() withDirs(t, func(datadir, execdir, _ string) { - const ( checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. checkpointsToKeep = 1 ) - db := common.InitStorage(datadir) - commits := store.NewCommits(metr, badgerimpl.ToDB(db)) + // Initialize a proper Badger database instead of using empty directory + db := unittest.PebbleDB(t, datadir) + defer db.Close() + + // Convert to storage.DB interface + storageDB := pebbleimpl.ToDB(db) // generate some oldLedger data size := 10 @@ -117,8 +137,13 @@ func TestExtractExecutionState(t *testing.T) { // generate random block and map it to state commitment blockID := unittest.IdentifierFixture() - err = commits.Store(blockID, flow.StateCommitment(stateCommitment)) - require.NoError(t, err) + + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) + require.NoError(t, storageDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexStateCommitment(lctx, rw, blockID, flow.StateCommitment(stateCommitment)) + })) + lctx.Release() data := make(map[string]keyPair, len(keys)) for j, key := range keys { @@ -127,85 +152,40 @@ func TestExtractExecutionState(t *testing.T) { value: values[j], } } - - keysValuesByCommit[string(stateCommitment[:])] = data + keysValuesByCommit[stateCommitment.String()] = data commitsByBlocks[blockID] = stateCommitment blocksInOrder[i] = blockID } + // wait for the ledger and compactor to finish <-f.Done() <-compactor.Done() - err = db.Close() - require.NoError(t, err) - - // for blockID, stateCommitment := range commitsByBlocks { - - for i, blockID := range blocksInOrder { - - stateCommitment := commitsByBlocks[blockID] - - // we need fresh output dir to prevent contamination - unittest.RunWithTempDir(t, func(outdir string) { - - Cmd.SetArgs([]string{ - "--execution-state-dir", execdir, - "--output-dir", outdir, - "--state-commitment", stateCommitment.String(), - "--datadir", datadir, - "--no-migration", - "--no-report", - "--chain", flow.Emulator.Chain().String()}) - - err := Cmd.Execute() - require.NoError(t, err) + // extract the execution state + extractor := newExecutionStateExtractor(zerolog.Nop(), execdir, flow.StateCommitment(stateCommitment)) - diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), outdir, size, pathfinder.PathByteSize, wal.SegmentSize) - require.NoError(t, err) - - storage, err := complete.NewLedger(diskWal, 1000, metr, zerolog.Nop(), complete.DefaultPathFinderVersion) - require.NoError(t, err) - - const ( - checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. - checkpointsToKeep = 1 - ) - compactor, err := complete.NewCompactor(storage, diskWal, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false), &metrics.NoopCollector{}) - require.NoError(t, err) - - <-compactor.Ready() - - data := keysValuesByCommit[string(stateCommitment[:])] - - keys := make([]ledger.Key, 0, len(data)) - for _, v := range data { - keys = append(keys, v.key) - } - - query, err := ledger.NewQuery(stateCommitment, keys) - require.NoError(t, err) - - registerValues, err := storage.Get(query) - // registerValues, err := mForest.Read([]byte(stateCommitment), keys) - require.NoError(t, err) - - for i, key := range keys { - registerValue := registerValues[i] - require.Equal(t, data[key.String()].value, registerValue) - } + partialState, payloads, err := extractor.extract() + require.NoError(t, err) + require.False(t, partialState) + // Calculate expected number of payloads based on getSampleKeyValues logic + expectedPayloads := 2 + 4 + 2 + (7 * 10) // cases 0, 1, 2, and 7 default cases + require.Equal(t, expectedPayloads, len(payloads)) - // make sure blocks after this one are not in checkpoint - // ie - extraction stops after hitting right hash - for j := i + 1; j < len(blocksInOrder); j++ { + // verify the payloads + for _, payload := range payloads { + key, err := payload.Key() + require.NoError(t, err) - query.SetState(commitsByBlocks[blocksInOrder[j]]) - _, err := storage.Get(query) - require.Error(t, err) + // Look for the key in all state commitments + found := false + for _, commitData := range keysValuesByCommit { + if kv, exist := commitData[key.String()]; exist { + require.Equal(t, kv.value, payload.Value()) + found = true + break } - - <-storage.Done() - <-compactor.Done() - }) + } + require.True(t, found, "key %s not found in any state commitment", key.String()) } }) }) diff --git a/cmd/util/cmd/export-json-transactions/cmd.go b/cmd/util/cmd/export-json-transactions/cmd.go index 636c21754fa..d4f986026fc 100644 --- a/cmd/util/cmd/export-json-transactions/cmd.go +++ b/cmd/util/cmd/export-json-transactions/cmd.go @@ -8,11 +8,13 @@ import ( "os" "path/filepath" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/cmd/util/cmd/export-json-transactions/transactions" + "github.com/onflow/flow-go/storage" ) var flagDatadir string @@ -47,7 +49,8 @@ func init() { func run(*cobra.Command, []string) { log.Info().Msg("start exporting transactions") - err := ExportTransactions(flagDatadir, flagOutputDir, flagStartHeight, flagEndHeight) + lockManager := storage.MakeSingletonLockManager() + err := ExportTransactions(lockManager, flagDatadir, flagOutputDir, flagStartHeight, flagEndHeight) if err != nil { log.Fatal().Err(err).Msg("cannot get export transactions") } @@ -61,16 +64,19 @@ func writeJSONTo(writer io.Writer, jsonData []byte) error { // ExportTransactions exports transactions to JSON to the outputDir for height range specified by // startHeight and endHeight -func ExportTransactions(dataDir string, outputDir string, startHeight uint64, endHeight uint64) error { +func ExportTransactions(lockManager lockctx.Manager, dataDir string, outputDir string, startHeight uint64, endHeight uint64) error { // init dependencies - db := common.InitStorage(flagDatadir) + db, err := common.InitStorage(flagDatadir) + if err != nil { + return fmt.Errorf("could not initialize storage: %w", err) + } storages := common.InitStorages(db) defer db.Close() - state, err := common.InitProtocolState(db, storages) + state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { - return fmt.Errorf("could not init protocol state: %w", err) + return fmt.Errorf("could not open protocol state: %w", err) } // create finder diff --git a/cmd/util/cmd/export-json-transactions/transactions/range_test.go b/cmd/util/cmd/export-json-transactions/transactions/range_test.go index f8bc27b177d..8af254ea8ad 100644 --- a/cmd/util/cmd/export-json-transactions/transactions/range_test.go +++ b/cmd/util/cmd/export-json-transactions/transactions/range_test.go @@ -4,18 +4,19 @@ import ( "fmt" "testing" - badger "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" "github.com/onflow/flow-go/utils/unittest" ) func TestFindBlockTransactions(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() // prepare two blocks // block 1 has 2 collections // block 2 has 1 collection @@ -59,12 +60,24 @@ func TestFindBlockTransactions(t *testing.T) { state.On("AtHeight", uint64(5)).Return(snap5, nil) // store into database - require.NoError(t, payloads.Store(b1.ID(), b1.Payload)) - require.NoError(t, payloads.Store(b2.ID(), b2.Payload)) - - require.NoError(t, collections.Store(&col1.Collection)) - require.NoError(t, collections.Store(&col2.Collection)) - require.NoError(t, collections.Store(&col3.Collection)) + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertBlock)) + defer lctx.Release() + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := storages.Blocks.BatchStore(lctx, rw, &b1) + if err != nil { + return err + } + + return storages.Blocks.BatchStore(lctx, rw, &b2) + })) + + _, err := collections.Store(&col1.Collection) + require.NoError(t, err) + _, err = collections.Store(&col2.Collection) + require.NoError(t, err) + _, err = collections.Store(&col3.Collection) + require.NoError(t, err) f := &Finder{ State: state, diff --git a/cmd/util/cmd/find-inconsistent-result/cmd.go b/cmd/util/cmd/find-inconsistent-result/cmd.go index b1bd19804ee..3e62fcad371 100644 --- a/cmd/util/cmd/find-inconsistent-result/cmd.go +++ b/cmd/util/cmd/find-inconsistent-result/cmd.go @@ -4,7 +4,7 @@ import ( "errors" "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" @@ -12,7 +12,6 @@ import ( "github.com/onflow/flow-go/module/block_iterator/latest" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/operation/badgerimpl" ) var NoMissmatchFoundError = errors.New("No missmatch found") @@ -38,7 +37,8 @@ func init() { } func run(*cobra.Command, []string) { - err := findFirstMismatch(flagDatadir, flagStartHeight, flagEndHeight) + lockManager := storage.MakeSingletonLockManager() + err := findFirstMismatch(flagDatadir, flagStartHeight, flagEndHeight, lockManager) if err != nil { if errors.Is(err, NoMissmatchFoundError) { fmt.Printf("no mismatch found: %v\n", err) @@ -48,9 +48,9 @@ func run(*cobra.Command, []string) { } } -func findFirstMismatch(datadir string, startHeight, endHeight uint64) error { +func findFirstMismatch(datadir string, startHeight, endHeight uint64, lockManager lockctx.Manager) error { fmt.Printf("initializing database\n") - headers, results, seals, state, db, err := createStorages(datadir) + headers, results, seals, state, db, err := createStorages(datadir, lockManager) defer db.Close() if err != nil { return fmt.Errorf("could not create storages: %v", err) @@ -68,7 +68,7 @@ func findFirstMismatch(datadir string, startHeight, endHeight uint64) error { } if endHeight == 0 { - endHeight, err = latest.LatestSealedAndExecutedHeight(state, badgerimpl.ToDB(db)) + endHeight, err = latest.LatestSealedAndExecutedHeight(state, db) if err != nil { return fmt.Errorf("could not find last executed and sealed height: %v", err) } @@ -93,14 +93,17 @@ func findFirstMismatch(datadir string, startHeight, endHeight uint64) error { return nil } -func createStorages(dir string) ( - storage.Headers, storage.ExecutionResults, storage.Seals, protocol.State, *badger.DB, error) { - db := common.InitStorage(dir) +func createStorages(dir string, lockManager lockctx.Manager) ( + storage.Headers, storage.ExecutionResults, storage.Seals, protocol.State, storage.DB, error) { + db, err := common.InitStorage(dir) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("could not initialize storage: %v", err) + } storages := common.InitStorages(db) - state, err := common.InitProtocolState(db, storages) + state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { - return nil, nil, nil, nil, db, fmt.Errorf("could not init protocol state: %v", err) + return nil, nil, nil, nil, db, fmt.Errorf("could not open protocol state: %v", err) } return storages.Headers, storages.Results, storages.Seals, state, db, err diff --git a/cmd/util/cmd/pebble-checkpoint/cmd.go b/cmd/util/cmd/pebble-checkpoint/cmd.go new file mode 100644 index 00000000000..247caa2612b --- /dev/null +++ b/cmd/util/cmd/pebble-checkpoint/cmd.go @@ -0,0 +1,54 @@ +package cmd + +import ( + "fmt" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/storage/pebble" +) + +var ( + flagPebbleDir string + flagOutput string +) + +// Note: Although checkpoint is fast to create, it is not free. When creating a checkpoint, the +// underlying pebble sstables are hard-linked to the checkpoint directory, which means the compaction +// process will not be able to delete the sstables until the checkpoint is deleted. This can lead to +// increased disk usage if checkpoints are created frequently without being cleaned up. +var Cmd = &cobra.Command{ + Use: "pebble-checkpoint", + Short: "Create a checkpoint from a Pebble database", + RunE: runE, +} + +func init() { + Cmd.Flags().StringVar(&flagPebbleDir, "pebbledir", "", + "directory containing the Pebble database") + _ = Cmd.MarkFlagRequired("pebbledir") + + Cmd.Flags().StringVar(&flagOutput, "output", "", + "output directory for the checkpoint") + _ = Cmd.MarkFlagRequired("output") +} + +func runE(*cobra.Command, []string) error { + log.Info().Msgf("creating checkpoint from Pebble database at %v to %v", flagPebbleDir, flagOutput) + + // Initialize Pebble DB + db, err := pebble.ShouldOpenDefaultPebbleDB(log.Logger, flagPebbleDir) + if err != nil { + return fmt.Errorf("failed to initialize Pebble database %v: %w", flagPebbleDir, err) + } + + // Create checkpoint + err = db.Checkpoint(flagOutput) + if err != nil { + return fmt.Errorf("failed to create checkpoint %v: %w", flagOutput, err) + } + + log.Info().Msgf("successfully created checkpoint at %v", flagOutput) + return nil +} diff --git a/cmd/util/cmd/read-badger/cmd/blocks.go b/cmd/util/cmd/read-badger/cmd/blocks.go index 5b04d34a965..fd2b4ce625e 100644 --- a/cmd/util/cmd/read-badger/cmd/blocks.go +++ b/cmd/util/cmd/read-badger/cmd/blocks.go @@ -1,43 +1,72 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) var flagBlockID string +var flagBlockHeight uint64 func init() { rootCmd.AddCommand(blocksCmd) blocksCmd.Flags().StringVarP(&flagBlockID, "id", "i", "", "the id of the block") - _ = blocksCmd.MarkFlagRequired("id") + blocksCmd.Flags().Uint64Var(&flagBlockHeight, "height", 0, "Block height") } var blocksCmd = &cobra.Command{ Use: "blocks", - Short: "get a block by block ID", - Run: func(cmd *cobra.Command, args []string) { - storages, db := InitStorages() - defer db.Close() - - log.Info().Msgf("got flag block id: %s", flagBlockID) - blockID, err := flow.HexStringToIdentifier(flagBlockID) - if err != nil { - log.Error().Err(err).Msg("malformed block id") - return - } - - log.Info().Msgf("getting block by id: %v", blockID) - block, err := storages.Blocks.ByID(blockID) - if err != nil { - log.Error().Err(err).Msgf("could not get block with id: %v", blockID) - return - } - - common.PrettyPrintEntity(block) + Short: "get a block by block ID or height", + RunE: func(cmd *cobra.Command, args []string) error { + return WithStorage(func(db storage.DB) error { + cacheMetrics := &metrics.NoopCollector{} + headers := store.NewHeaders(cacheMetrics, db) + index := store.NewIndex(cacheMetrics, db) + guarantees := store.NewGuarantees(cacheMetrics, db, store.DefaultCacheSize) + seals := store.NewSeals(cacheMetrics, db) + results := store.NewExecutionResults(cacheMetrics, db) + receipts := store.NewExecutionReceipts(cacheMetrics, db, results, store.DefaultCacheSize) + payloads := store.NewPayloads(db, index, guarantees, seals, receipts, results) + blocks := store.NewBlocks(db, headers, payloads) + + var block *flow.Block + var err error + + if flagBlockID != "" { + log.Info().Msgf("got flag block id: %s", flagBlockID) + blockID, err := flow.HexStringToIdentifier(flagBlockID) + if err != nil { + return fmt.Errorf("malformed block id: %w", err) + } + + log.Info().Msgf("getting block by id: %v", blockID) + + block, err = blocks.ByID(blockID) + if err != nil { + return fmt.Errorf("could not get block with id %v: %w", blockID, err) + } + } else if flagBlockHeight != 0 { + log.Info().Msgf("got flag block height: %d", flagBlockHeight) + + block, err = blocks.ByHeight(flagBlockHeight) + if err != nil { + return fmt.Errorf("could not get block with height %d: %w", flagBlockHeight, err) + } + } else { + return fmt.Errorf("provide either a --id or --height and not both, (--block-id: %v), (--height: %v)", flagBlockID, flagBlockHeight) + } + + common.PrettyPrintEntity(block) + return nil + }) }, } diff --git a/cmd/util/cmd/read-badger/cmd/chunk_data_pack.go b/cmd/util/cmd/read-badger/cmd/chunk_data_pack.go index 7c9dd7652d4..e4d0bd2b7c5 100644 --- a/cmd/util/cmd/read-badger/cmd/chunk_data_pack.go +++ b/cmd/util/cmd/read-badger/cmd/chunk_data_pack.go @@ -3,7 +3,7 @@ package cmd import ( "fmt" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" "github.com/rs/zerolog/log" "github.com/spf13/cobra" @@ -11,7 +11,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - badgerstorage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/operation/pebbleimpl" "github.com/onflow/flow-go/storage/store" ) @@ -37,7 +37,8 @@ var chunkDataPackCmd = &cobra.Command{ } metrics := metrics.NewNoopCollector() - collections := badgerstorage.NewCollections(bdb, badgerstorage.NewTransactions(metrics, bdb)) + db := badgerimpl.ToDB(bdb) + collections := store.NewCollections(db, store.NewTransactions(metrics, db)) chunkDataPacks := store.NewChunkDataPacks(metrics, pebbleimpl.ToDB(pdb), collections, 1) diff --git a/cmd/util/cmd/read-badger/cmd/cluster_blocks.go b/cmd/util/cmd/read-badger/cmd/cluster_blocks.go index 4d3df3f363d..6de942e66b0 100644 --- a/cmd/util/cmd/read-badger/cmd/cluster_blocks.go +++ b/cmd/util/cmd/read-badger/cmd/cluster_blocks.go @@ -1,13 +1,16 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) var flagChainName string @@ -27,60 +30,52 @@ func init() { var clusterBlocksCmd = &cobra.Command{ Use: "cluster-blocks", Short: "get cluster blocks", - Run: func(cmd *cobra.Command, args []string) { - metrics := metrics.NewNoopCollector() - flagDBs := common.ReadDBFlags() - db, err := common.InitBadgerStorage(flagDBs) - if err != nil { - log.Fatal().Err(err).Msg("could not init badger db") - } - defer db.Close() - - headers := badger.NewHeaders(metrics, db) - clusterPayloads := badger.NewClusterPayloads(metrics, db) - - // get chain id - log.Info().Msgf("got flag chain name: %s", flagChainName) - chainID := flow.ChainID(flagChainName) - clusterBlocks := badger.NewClusterBlocks(db, chainID, headers, clusterPayloads) - - if flagClusterBlockID != "" && flagHeight != 0 { - log.Error().Msg("provide either a --id or --height and not both") - return - } - - if flagClusterBlockID != "" { - log.Info().Msgf("got flag cluster block id: %s", flagClusterBlockID) - clusterBlockID, err := flow.HexStringToIdentifier(flagClusterBlockID) - if err != nil { - log.Error().Err(err).Msg("malformed cluster block id") - return + RunE: func(cmd *cobra.Command, args []string) error { + return WithStorage(func(db storage.DB) error { + metrics := metrics.NewNoopCollector() + + headers := store.NewHeaders(metrics, db) + clusterPayloads := store.NewClusterPayloads(metrics, db) + + // get chain id + log.Info().Msgf("got flag chain name: %s", flagChainName) + chainID := flow.ChainID(flagChainName) + clusterBlocks := store.NewClusterBlocks(db, chainID, headers, clusterPayloads) + + if flagClusterBlockID != "" && flagHeight != 0 { + return fmt.Errorf("provide either a --id or --height and not both") } - log.Info().Msgf("getting cluster block by id: %v", clusterBlockID) - clusterBlock, err := clusterBlocks.ByID(clusterBlockID) - if err != nil { - log.Error().Err(err).Msgf("could not get cluster block with id: %v", clusterBlockID) - return + if flagClusterBlockID != "" { + log.Info().Msgf("got flag cluster block id: %s", flagClusterBlockID) + clusterBlockID, err := flow.HexStringToIdentifier(flagClusterBlockID) + if err != nil { + return fmt.Errorf("malformed cluster block id: %w", err) + } + + log.Info().Msgf("getting cluster block by id: %v", clusterBlockID) + clusterBlock, err := clusterBlocks.ByID(clusterBlockID) + if err != nil { + return fmt.Errorf("could not get cluster block with id: %v, %w", clusterBlockID, err) + } + + common.PrettyPrint(clusterBlock) + return nil } - common.PrettyPrint(clusterBlock) - return - } + if flagHeight > 0 { + log.Info().Msgf("getting cluster block by height: %v", flagHeight) + clusterBlock, err := clusterBlocks.ByHeight(flagHeight) + if err != nil { + return fmt.Errorf("could not get cluster block with height: %v, %w", flagHeight, err) + } - if flagClusterBlockID != "" { - log.Info().Msgf("getting cluster block by height: %v", flagHeight) - clusterBlock, err := clusterBlocks.ByHeight(flagHeight) - if err != nil { - log.Error().Err(err).Msgf("could not get cluster block with height: %v", flagHeight) - return + log.Info().Msgf("block id: %v", clusterBlock.ID()) + common.PrettyPrint(clusterBlock) + return nil } - log.Info().Msgf("block id: %v", clusterBlock.ID()) - common.PrettyPrint(clusterBlock) - return - } - - log.Error().Msg("provide either a --id or --height") + return fmt.Errorf("provide either a --id or --height") + }) }, } diff --git a/cmd/util/cmd/read-badger/cmd/commits.go b/cmd/util/cmd/read-badger/cmd/commits.go index c64ad77e4e6..8bee88c05d7 100644 --- a/cmd/util/cmd/read-badger/cmd/commits.go +++ b/cmd/util/cmd/read-badger/cmd/commits.go @@ -42,7 +42,7 @@ var commitsCmd = &cobra.Command{ return fmt.Errorf("could not get commit for block id: %v: %w", blockID, err) } - log.Info().Msgf("commit: %x", commit) + log.Info().Msgf("commit: %v", commit) return nil }) diff --git a/cmd/util/cmd/read-badger/cmd/storages.go b/cmd/util/cmd/read-badger/cmd/storages.go index 5fcb47a5493..d245aa99ee8 100644 --- a/cmd/util/cmd/read-badger/cmd/storages.go +++ b/cmd/util/cmd/read-badger/cmd/storages.go @@ -1,7 +1,7 @@ package cmd import ( - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" "github.com/rs/zerolog/log" @@ -17,8 +17,8 @@ func InitStorages() (*storage.All, *badger.DB) { log.Fatal().Err(err).Msg("could not parse db flag") } - db := common.InitStorage(usedDir.DBDir) - storages := common.InitStorages(db) + db := common.InitBadgerDBStorage(usedDir.DBDir) + storages := common.InitBadgerStorages(db) return storages, db } diff --git a/cmd/util/cmd/read-badger/cmd/transaction_results.go b/cmd/util/cmd/read-badger/cmd/transaction_results.go index 47f3fbee1f3..b879a573365 100644 --- a/cmd/util/cmd/read-badger/cmd/transaction_results.go +++ b/cmd/util/cmd/read-badger/cmd/transaction_results.go @@ -3,15 +3,13 @@ package cmd import ( "fmt" - "github.com/cockroachdb/pebble" - "github.com/dgraph-io/badger/v2" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/store" ) @@ -25,10 +23,10 @@ func init() { var transactionResultsCmd = &cobra.Command{ Use: "transaction-results", Short: "get transaction-result by block ID", - Run: func(cmd *cobra.Command, args []string) { - err := WithBadgerAndPebble(func(bdb *badger.DB, pdb *pebble.DB) error { - transactionResults := store.NewTransactionResults(metrics.NewNoopCollector(), pebbleimpl.ToDB(pdb), 1) - storages := common.InitStorages(bdb) + RunE: func(cmd *cobra.Command, args []string) error { + return WithStorage(func(db storage.DB) error { + transactionResults := store.NewTransactionResults(metrics.NewNoopCollector(), db, 1) + storages := common.InitStorages(db) log.Info().Msgf("got flag block id: %s", flagBlockID) blockID, err := flow.HexStringToIdentifier(flagBlockID) if err != nil { @@ -65,9 +63,5 @@ var transactionResultsCmd = &cobra.Command{ return nil }) - - if err != nil { - log.Error().Err(err).Msg("could not get transaction results") - } }, } diff --git a/cmd/util/cmd/read-light-block/read_light_block_test.go b/cmd/util/cmd/read-light-block/read_light_block_test.go index 78a84d60823..f7b3be1b4d4 100644 --- a/cmd/util/cmd/read-light-block/read_light_block_test.go +++ b/cmd/util/cmd/read-light-block/read_light_block_test.go @@ -3,43 +3,57 @@ package read import ( "testing" - "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/module/metrics" - badgerstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/procedure" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) func TestReadClusterRange(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() chain := unittest.ClusterBlockChainFixture(5) parent, blocks := chain[0], chain[1:] + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) // add parent as boundary - err := db.Update(operation.IndexClusterBlockHeight(parent.Header.ChainID, parent.Header.Height, parent.ID())) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockHeight(lctx, rw.Writer(), parent.Header.ChainID, parent.Header.Height, parent.ID()) + }) require.NoError(t, err) - err = db.Update(operation.InsertClusterFinalizedHeight(parent.Header.ChainID, parent.Header.Height)) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertClusterFinalizedHeight(lctx, rw.Writer(), parent.Header.ChainID, parent.Header.Height) + }) require.NoError(t, err) // add blocks for _, block := range blocks { - err := db.Update(procedure.InsertClusterBlock(&block)) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.InsertClusterBlock(lctx, rw, &block) + }) require.NoError(t, err) - err = db.Update(procedure.FinalizeClusterBlock(block.Header.ID())) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.FinalizeClusterBlock(lctx, rw, block.Header.ID()) + }) require.NoError(t, err) } - clusterBlocks := badgerstorage.NewClusterBlocks( + lctx.Release() + + clusterBlocks := store.NewClusterBlocks( db, blocks[0].Header.ChainID, - badgerstorage.NewHeaders(metrics.NewNoopCollector(), db), - badgerstorage.NewClusterPayloads(metrics.NewNoopCollector(), db), + store.NewHeaders(metrics.NewNoopCollector(), db), + store.NewClusterPayloads(metrics.NewNoopCollector(), db), ) startHeight := blocks[0].Header.Height diff --git a/cmd/util/cmd/read-protocol-state/cmd/blocks.go b/cmd/util/cmd/read-protocol-state/cmd/blocks.go index 838110de501..148cfdc7a74 100644 --- a/cmd/util/cmd/read-protocol-state/cmd/blocks.go +++ b/cmd/util/cmd/read-protocol-state/cmd/blocks.go @@ -24,7 +24,7 @@ var ( var Cmd = &cobra.Command{ Use: "blocks", Short: "Read block from protocol state", - Run: run, + RunE: runE, } func init() { @@ -147,109 +147,105 @@ func (r *Reader) IsExecuted(blockID flow.Identifier) (bool, error) { return false, err } -func run(*cobra.Command, []string) { +func runE(*cobra.Command, []string) error { + lockManager := storage.MakeSingletonLockManager() flagDBs := common.ReadDBFlags() - db, err := common.InitBadgerStorage(flagDBs) - if err != nil { - log.Fatal().Err(err).Msg("could not init badger db") - } - defer db.Close() - - storages := common.InitStorages(db) - state, err := common.InitProtocolState(db, storages) - en := common.InitExecutionStorages(db) + return common.WithStorage(flagDBs, func(db storage.DB) error { + storages := common.InitStorages(db) + state, err := common.OpenProtocolState(lockManager, db, storages) - if err != nil { - log.Fatal().Err(err).Msg("could not init protocol state") - } - - reader := NewReader(state, storages.Blocks, en.Commits) - - // making sure only one flag is being used - err = checkOnlyOneFlagIsUsed(flagHeight, flagBlockID, flagFinal, flagSealed, flagExecuted) - if err != nil { - log.Fatal().Err(err).Msg("could not get block") - } - - if flagHeight > 0 { - log.Info().Msgf("get block by height: %v", flagHeight) - block, err := reader.GetBlockByHeight(flagHeight) if err != nil { - log.Fatal().Err(err).Msg("could not get block by height") + log.Fatal().Err(err).Msg("could not init protocol state") } - common.PrettyPrintEntity(block) - return - } + reader := NewReader(state, storages.Blocks, storages.Commits) - if flagBlockID != "" { - blockID, err := flow.HexStringToIdentifier(flagBlockID) - if err != nil { - log.Fatal().Err(err).Msgf("malformed block ID: %v", flagBlockID) - } - log.Info().Msgf("get block by ID: %v", blockID) - block, err := reader.GetBlockByID(blockID) + // making sure only one flag is being used + err = checkOnlyOneFlagIsUsed(flagHeight, flagBlockID, flagFinal, flagSealed, flagExecuted) if err != nil { - log.Fatal().Err(err).Msg("could not get block by ID") + return fmt.Errorf("could not get block: %w", err) } - common.PrettyPrintEntity(block) - return - } - if flagFinal { - log.Info().Msgf("get last finalized block") - block, err := reader.GetFinal() - if err != nil { - log.Fatal().Err(err).Msg("could not get finalized block") + if flagHeight > 0 { + log.Info().Msgf("get block by height: %v", flagHeight) + block, err := reader.GetBlockByHeight(flagHeight) + if err != nil { + log.Fatal().Err(err).Msg("could not get block by height") + } + + common.PrettyPrintEntity(block) + return nil } - common.PrettyPrintEntity(block) - return - } - if flagSealed { - log.Info().Msgf("get last sealed block") - block, err := reader.GetSealed() - if err != nil { - log.Fatal().Err(err).Msg("could not get sealed block") + if flagBlockID != "" { + blockID, err := flow.HexStringToIdentifier(flagBlockID) + if err != nil { + return fmt.Errorf("malformed block ID: %v: %w", flagBlockID, err) + } + log.Info().Msgf("get block by ID: %v", blockID) + block, err := reader.GetBlockByID(blockID) + if err != nil { + return fmt.Errorf("could not get block by ID: %w", err) + } + common.PrettyPrintEntity(block) + return nil } - common.PrettyPrintEntity(block) - return - } - if flagExecuted { - log.Info().Msgf("get last executed and sealed block") - sealed, err := reader.GetSealed() - if err != nil { - log.Fatal().Err(err).Msg("could not get sealed block") + if flagFinal { + log.Info().Msgf("get last finalized block") + block, err := reader.GetFinal() + if err != nil { + return fmt.Errorf("could not get finalized block: %w", err) + } + common.PrettyPrintEntity(block) + return nil } - root, err := reader.GetRoot() - if err != nil { - log.Fatal().Err(err).Msg("could not get root block") + if flagSealed { + log.Info().Msgf("get last sealed block") + block, err := reader.GetSealed() + if err != nil { + return fmt.Errorf("could not get sealed block: %w", err) + } + common.PrettyPrintEntity(block) + return nil } - // find the last executed and sealed block - for h := sealed.Header.Height; h >= root.Header.Height; h-- { - block, err := reader.GetBlockByHeight(h) + if flagExecuted { + log.Info().Msgf("get last executed and sealed block") + sealed, err := reader.GetSealed() if err != nil { - log.Fatal().Err(err).Msgf("could not get block by height: %v", h) + return fmt.Errorf("could not get sealed block: %w", err) } - executed, err := reader.IsExecuted(block.ID()) + root, err := reader.GetRoot() if err != nil { - log.Fatal().Err(err).Msgf("could not check block executed or not: %v", h) + return fmt.Errorf("could not get root block: %w", err) } - if executed { - common.PrettyPrintEntity(block) - return + // find the last executed and sealed block + for h := sealed.Header.Height; h >= root.Header.Height; h-- { + block, err := reader.GetBlockByHeight(h) + if err != nil { + return fmt.Errorf("could not get block by height: %v: %w", h, err) + } + + executed, err := reader.IsExecuted(block.ID()) + if err != nil { + log.Fatal().Err(err).Msgf("could not check block executed or not: %v", h) + } + + if executed { + common.PrettyPrintEntity(block) + return nil + } } - } - log.Fatal().Msg("could not find executed block") - } + return fmt.Errorf("could not find executed block") + } - log.Fatal().Msgf("missing flag, try --final or --sealed or --height or --executed or --block-id, note that only one flag can be used at a time") + return fmt.Errorf("missing flag, try --final or --sealed or --height or --executed or --block-id, note that only one flag can be used at a time") + }) } func checkOnlyOneFlagIsUsed(height uint64, blockID string, final, sealed, executed bool) error { diff --git a/cmd/util/cmd/read-protocol-state/cmd/snapshot.go b/cmd/util/cmd/read-protocol-state/cmd/snapshot.go index 162c7dbacd7..75e95caf191 100644 --- a/cmd/util/cmd/read-protocol-state/cmd/snapshot.go +++ b/cmd/util/cmd/read-protocol-state/cmd/snapshot.go @@ -1,6 +1,8 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" @@ -9,6 +11,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/storage" ) var flagCheckpointDir string @@ -18,7 +21,7 @@ var flagCheckpointScanEndHeight int64 var SnapshotCmd = &cobra.Command{ Use: "snapshot", Short: "Read snapshot from protocol state", - Run: runSnapshot, + RunE: runSnapshotE, } func init() { @@ -46,83 +49,79 @@ func init() { "(execution node only) scan end height for finding sealed height by checkpoint (use with --checkpoint-dir flag)") } -func runSnapshot(*cobra.Command, []string) { - flagDBs := common.ReadDBFlags() - db, err := common.InitBadgerStorage(flagDBs) - if err != nil { - log.Fatal().Err(err).Msg("could not init badger db") - } - defer db.Close() - - storages := common.InitStorages(db) - state, err := common.InitProtocolState(db, storages) - if err != nil { - log.Fatal().Err(err).Msg("could not init protocol state") - } - - var snapshot protocol.Snapshot - - if flagHeight > 0 { - log.Info().Msgf("get snapshot by height: %v", flagHeight) - snapshot = state.AtHeight(flagHeight) - } else if flagBlockID != "" { - log.Info().Msgf("get snapshot by block ID: %v", flagBlockID) - blockID := flow.MustHexStringToIdentifier(flagBlockID) - snapshot = state.AtBlockID(blockID) - } else if flagFinal { - log.Info().Msgf("get last finalized snapshot") - snapshot = state.Final() - } else if flagSealed { - log.Info().Msgf("get last sealed snapshot") - snapshot = state.Sealed() - } else if flagCheckpointDir != "" { - log.Info().Msgf("get snapshot for latest checkpoint in directory %v (step: %v, endHeight: %v)", - flagCheckpointDir, flagCheckpointScanStep, flagCheckpointScanEndHeight) - var protocolSnapshot protocol.Snapshot - var sealedHeight uint64 - var sealedCommit flow.StateCommitment - var checkpointFile string - if flagCheckpointScanEndHeight < 0 { - // using default end height which is the last sealed height - protocolSnapshot, sealedHeight, sealedCommit, checkpointFile, err = commonFuncs.GenerateProtocolSnapshotForCheckpoint( - log.Logger, state, storages.Headers, storages.Seals, flagCheckpointDir, flagCheckpointScanStep) - } else { - // using customized end height - protocolSnapshot, sealedHeight, sealedCommit, checkpointFile, err = commonFuncs.GenerateProtocolSnapshotForCheckpointWithHeights( - log.Logger, state, storages.Headers, storages.Seals, flagCheckpointDir, flagCheckpointScanStep, uint64(flagCheckpointScanEndHeight)) - } - +func runSnapshotE(*cobra.Command, []string) error { + lockManager := storage.MakeSingletonLockManager() + return common.WithStorage(common.ReadDBFlags(), func(db storage.DB) error { + storages := common.InitStorages(db) + state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { - log.Fatal().Err(err).Msgf("could not generate protocol snapshot for checkpoint in dir: %v", flagCheckpointDir) + return fmt.Errorf("could not init protocol state") } - snapshot = protocolSnapshot - log.Info().Msgf("snapshot found for checkpoint file %v, sealed height %v, commit %x", checkpointFile, sealedHeight, sealedCommit) - } + var snapshot protocol.Snapshot + + if flagHeight > 0 { + log.Info().Msgf("get snapshot by height: %v", flagHeight) + snapshot = state.AtHeight(flagHeight) + } else if flagBlockID != "" { + log.Info().Msgf("get snapshot by block ID: %v", flagBlockID) + blockID := flow.MustHexStringToIdentifier(flagBlockID) + snapshot = state.AtBlockID(blockID) + } else if flagFinal { + log.Info().Msgf("get last finalized snapshot") + snapshot = state.Final() + } else if flagSealed { + log.Info().Msgf("get last sealed snapshot") + snapshot = state.Sealed() + } else if flagCheckpointDir != "" { + log.Info().Msgf("get snapshot for latest checkpoint in directory %v (step: %v, endHeight: %v)", + flagCheckpointDir, flagCheckpointScanStep, flagCheckpointScanEndHeight) + var protocolSnapshot protocol.Snapshot + var sealedHeight uint64 + var sealedCommit flow.StateCommitment + var checkpointFile string + if flagCheckpointScanEndHeight < 0 { + // using default end height which is the last sealed height + protocolSnapshot, sealedHeight, sealedCommit, checkpointFile, err = commonFuncs.GenerateProtocolSnapshotForCheckpoint( + log.Logger, state, storages.Headers, storages.Seals, flagCheckpointDir, flagCheckpointScanStep) + } else { + // using customized end height + protocolSnapshot, sealedHeight, sealedCommit, checkpointFile, err = commonFuncs.GenerateProtocolSnapshotForCheckpointWithHeights( + log.Logger, state, storages.Headers, storages.Seals, flagCheckpointDir, flagCheckpointScanStep, uint64(flagCheckpointScanEndHeight)) + } + if err != nil { + return fmt.Errorf("could not generate protocol snapshot for checkpoint in dir: %v: %w", flagCheckpointDir, err) + } + + snapshot = protocolSnapshot + log.Info().Msgf("snapshot found for checkpoint file %v, sealed height %v, commit %x", checkpointFile, sealedHeight, sealedCommit) + } - head, err := snapshot.Head() - if err != nil { - log.Fatal().Err(err).Msg("fail to get block of snapshot") - } + head, err := snapshot.Head() + if err != nil { + return fmt.Errorf("fail to get block of snapshot: %w", err) + } - log.Info().Msgf("creating snapshot for block height %v, id %v", head.Height, head.ID()) + log.Info().Msgf("creating snapshot for block height %v, id %v", head.Height, head.ID()) - serializable, err := inmem.FromSnapshot(snapshot) - if err != nil { - log.Fatal().Err(err).Msg("fail to serialize snapshot") - } + serializable, err := inmem.FromSnapshot(snapshot) + if err != nil { + return fmt.Errorf("fail to serialize snapshot: %w", err) + } - sealingSegment, err := serializable.SealingSegment() - if err != nil { - log.Fatal().Err(err).Msg("could not get sealing segment") - } + sealingSegment, err := serializable.SealingSegment() + if err != nil { + return fmt.Errorf("could not get sealing segment: %w", err) + } - log.Info().Msgf("snapshot created, sealed height %v, id %v", - sealingSegment.Sealed().Header.Height, sealingSegment.Sealed().Header.ID()) + log.Info().Msgf("snapshot created, sealed height %v, id %v", + sealingSegment.Sealed().Header.Height, sealingSegment.Sealed().Header.ID()) - log.Info().Msgf("highest finalized height %v, id %v", - sealingSegment.Highest().Header.Height, sealingSegment.Highest().Header.ID()) + log.Info().Msgf("highest finalized height %v, id %v", + sealingSegment.Highest().Header.Height, sealingSegment.Highest().Header.ID()) - encoded := serializable.Encodable() - common.PrettyPrint(encoded) + encoded := serializable.Encodable() + common.PrettyPrint(encoded) + return nil + }) } diff --git a/cmd/util/cmd/reindex/cmd/results.go b/cmd/util/cmd/reindex/cmd/results.go index 40e9638816d..538e0593312 100644 --- a/cmd/util/cmd/reindex/cmd/results.go +++ b/cmd/util/cmd/reindex/cmd/results.go @@ -5,6 +5,7 @@ import ( "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/storage" ) func init() { @@ -15,12 +16,16 @@ var resultsCmd = &cobra.Command{ Use: "results", Short: "reindex sealed result IDs by block ID", Run: func(cmd *cobra.Command, args []string) { - db := common.InitStorage(flagDatadir) + lockManager := storage.MakeSingletonLockManager() + db, err := common.InitStorage(flagDatadir) + if err != nil { + log.Fatal().Err(err).Msg("could not initialize storage") + } defer db.Close() storages := common.InitStorages(db) - state, err := common.InitProtocolState(db, storages) + state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { - log.Fatal().Err(err).Msg("could not init protocol state") + log.Fatal().Err(err).Msg("could not open protocol state") } results := storages.Results diff --git a/cmd/util/cmd/remove-execution-fork/cmd/execution-fork.go b/cmd/util/cmd/remove-execution-fork/cmd/execution-fork.go index 5cb82c63dc5..d62dc9021eb 100644 --- a/cmd/util/cmd/remove-execution-fork/cmd/execution-fork.go +++ b/cmd/util/cmd/remove-execution-fork/cmd/execution-fork.go @@ -6,7 +6,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/operation" ) func run(*cobra.Command, []string) { @@ -14,10 +14,15 @@ func run(*cobra.Command, []string) { Str("datadir", flagDatadir). Msg("flags") - db := common.InitStorage(flagDatadir) + db, err := common.InitStorage(flagDatadir) + if err != nil { + log.Fatal().Err(err).Msg("could not initialize storage") + } defer db.Close() - err := db.Update(operation.RemoveExecutionForkEvidence()) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.RemoveExecutionForkEvidence(rw.Writer()) + }) // for testing purpose // expectedSeals := unittest.IncorporatedResultSeal.Fixtures(2) diff --git a/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height.go b/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height.go index 5a4a564905a..e4214e846f8 100644 --- a/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height.go +++ b/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height.go @@ -13,7 +13,6 @@ import ( "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/operation/pebbleimpl" storagepebble "github.com/onflow/flow-go/storage/pebble" "github.com/onflow/flow-go/storage/store" @@ -28,7 +27,7 @@ var ( var Cmd = &cobra.Command{ Use: "rollback-executed-height", Short: "Rollback the executed height", - Run: run, + RunE: runE, } func init() { @@ -47,7 +46,9 @@ func init() { _ = Cmd.MarkFlagRequired("chunk_data_pack_dir") } -func run(*cobra.Command, []string) { +func runE(*cobra.Command, []string) error { + lockManager := storage.MakeSingletonLockManager() + log.Info(). Str("datadir", flagDataDir). Str("chunk_data_pack_dir", flagChunkDataPackDir). @@ -57,17 +58,19 @@ func run(*cobra.Command, []string) { if flagHeight == 0 { // this would be a mistake that the height flag is used but no height value // was specified, so the default value 0 is used. - log.Fatal().Msg("height must be above 0") + return fmt.Errorf("height must be above 0: %v", flagHeight) } - bdb := common.InitStorage(flagDataDir) - storages := common.InitStorages(bdb) - state, err := common.InitProtocolState(bdb, storages) + db, err := common.InitStorage(flagDataDir) + if err != nil { + return err + } + storages := common.InitStorages(db) + state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { - log.Fatal().Err(err).Msg("could not init protocol states") + return fmt.Errorf("could not open protocol states: %w", err) } - db := badgerimpl.ToDB(bdb) metrics := &metrics.NoopCollector{} transactionResults := store.NewTransactionResults(metrics, db, badger.DefaultCacheSize) @@ -75,13 +78,13 @@ func run(*cobra.Command, []string) { results := store.NewExecutionResults(metrics, db) receipts := store.NewExecutionReceipts(metrics, db, results, badger.DefaultCacheSize) myReceipts := store.NewMyExecutionReceipts(metrics, db, receipts) - headers := badger.NewHeaders(metrics, bdb) + headers := store.NewHeaders(metrics, db) events := store.NewEvents(metrics, db) serviceEvents := store.NewServiceEvents(metrics, db) - transactions := badger.NewTransactions(metrics, bdb) - collections := badger.NewCollections(bdb, transactions) + transactions := store.NewTransactions(metrics, db) + collections := store.NewCollections(db, transactions) // require the chunk data pack data must exist before returning the storage module - chunkDataPacksPebbleDB, err := storagepebble.MustOpenDefaultPebbleDB( + chunkDataPacksPebbleDB, err := storagepebble.ShouldOpenDefaultPebbleDB( log.Logger.With().Str("pebbledb", "cdp").Logger(), flagChunkDataPackDir) if err != nil { log.Fatal().Err(err).Msgf("could not open chunk data pack DB at %v", flagChunkDataPackDir) @@ -134,6 +137,7 @@ func run(*cobra.Command, []string) { log.Info().Msgf("executed height rolled back to %v", flagHeight) + return nil } // use badger instances directly instead of stroage interfaces so that the interface don't diff --git a/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go b/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go index be1b121b267..538b12c5f91 100644 --- a/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go +++ b/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go @@ -4,8 +4,9 @@ import ( "context" "testing" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/engine/execution/state" @@ -13,6 +14,7 @@ import ( "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/storage" bstorage "github.com/onflow/flow-go/storage/badger" "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/operation/pebbleimpl" @@ -25,33 +27,39 @@ import ( func TestReExecuteBlock(t *testing.T) { unittest.RunWithBadgerDB(t, func(bdb *badger.DB) { unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() // bootstrap to init highest executed height bootstrapper := bootstrap.NewBootstrapper(unittest.Logger()) - genesis := unittest.BlockHeaderFixture() - rootSeal := unittest.Seal.Fixture(unittest.Seal.WithBlock(genesis)) + genesis := unittest.BlockFixture() + rootSeal := unittest.Seal.Fixture(unittest.Seal.WithBlock(genesis.Header)) db := badgerimpl.ToDB(bdb) - err := bootstrapper.BootstrapExecutionDatabase(db, rootSeal) + err := bootstrapper.BootstrapExecutionDatabase(lockManager, db, rootSeal) require.NoError(t, err) // create all modules metrics := &metrics.NoopCollector{} - headers := bstorage.NewHeaders(metrics, bdb) - txResults := store.NewTransactionResults(metrics, db, bstorage.DefaultCacheSize) + all := store.InitAll(metrics, db) + headers := all.Headers + blocks := all.Blocks + txResults := store.NewTransactionResults(metrics, db, store.DefaultCacheSize) commits := store.NewCommits(metrics, db) - chunkDataPacks := store.NewChunkDataPacks(metrics, pebbleimpl.ToDB(pdb), bstorage.NewCollections(bdb, bstorage.NewTransactions(metrics, bdb)), bstorage.DefaultCacheSize) - results := store.NewExecutionResults(metrics, db) - receipts := store.NewExecutionReceipts(metrics, db, results, bstorage.DefaultCacheSize) + chunkDataPacks := store.NewChunkDataPacks(metrics, pebbleimpl.ToDB(pdb), store.NewCollections(db, store.NewTransactions(metrics, db)), store.DefaultCacheSize) + results := all.Results + receipts := all.Receipts myReceipts := store.NewMyExecutionReceipts(metrics, db, receipts) events := store.NewEvents(metrics, db) serviceEvents := store.NewServiceEvents(metrics, db) - err = headers.Store(genesis) - require.NoError(t, err) + unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, &genesis) + }) + }) getLatestFinalized := func() (uint64, error) { - return genesis.Height, nil + return genesis.Header.Height, nil } // create execution state module @@ -71,13 +79,19 @@ func TestReExecuteBlock(t *testing.T) { trace.NewNoopTracer(), nil, false, + lockManager, ) require.NotNil(t, es) computationResult := testutil.ComputationResultFixture(t) header := computationResult.Block.Header - err = headers.Store(header) + lctx2 := lockManager.NewContext() + require.NoError(t, lctx2.AcquireLock(storage.LockInsertBlock)) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx2, rw, computationResult.Block) + }) + lctx2.Release() require.NoError(t, err) // save execution results @@ -162,136 +176,143 @@ func TestReExecuteBlock(t *testing.T) { // Test save block execution related data, then remove it, and then // save again with different result should work func TestReExecuteBlockWithDifferentResult(t *testing.T) { - unittest.RunWithBadgerDB(t, func(bdb *badger.DB) { - unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { - - // bootstrap to init highest executed height - bootstrapper := bootstrap.NewBootstrapper(unittest.Logger()) - genesis := unittest.BlockHeaderFixture() - rootSeal := unittest.Seal.Fixture() - unittest.Seal.WithBlock(genesis)(rootSeal) - - db := badgerimpl.ToDB(bdb) - err := bootstrapper.BootstrapExecutionDatabase(db, rootSeal) - require.NoError(t, err) - - // create all modules - metrics := &metrics.NoopCollector{} - - headers := bstorage.NewHeaders(metrics, bdb) - txResults := store.NewTransactionResults(metrics, db, bstorage.DefaultCacheSize) - commits := store.NewCommits(metrics, db) - results := store.NewExecutionResults(metrics, db) - receipts := store.NewExecutionReceipts(metrics, db, results, bstorage.DefaultCacheSize) - myReceipts := store.NewMyExecutionReceipts(metrics, db, receipts) - events := store.NewEvents(metrics, db) - serviceEvents := store.NewServiceEvents(metrics, db) - transactions := bstorage.NewTransactions(metrics, bdb) - collections := bstorage.NewCollections(bdb, transactions) - chunkDataPacks := store.NewChunkDataPacks(metrics, pebbleimpl.ToDB(pdb), collections, bstorage.DefaultCacheSize) - - err = headers.Store(genesis) - require.NoError(t, err) - - getLatestFinalized := func() (uint64, error) { - return genesis.Height, nil - } - - // create execution state module - es := state.NewExecutionState( - nil, - commits, - nil, - headers, - chunkDataPacks, - results, - myReceipts, - events, - serviceEvents, - txResults, - db, - getLatestFinalized, - trace.NewNoopTracer(), - nil, - false, - ) - require.NotNil(t, es) - - executableBlock := unittest.ExecutableBlockFixtureWithParent( - nil, - genesis, - &unittest.GenesisStateCommitment) - header := executableBlock.Block.Header - - err = headers.Store(header) - require.NoError(t, err) - - computationResult := testutil.ComputationResultFixture(t) - computationResult.ExecutableBlock = executableBlock - computationResult.ExecutionReceipt.ExecutionResult.BlockID = header.ID() - - // save execution results - err = es.SaveExecutionResults(context.Background(), computationResult) - require.NoError(t, err) - - batch := db.NewBatch() - defer batch.Close() - - chunkBatch := pebbleimpl.ToDB(pdb).NewBatch() - defer chunkBatch.Close() - - // remove execution results - err = removeForBlockID( - batch, - chunkBatch, - commits, - txResults, - results, - chunkDataPacks, - myReceipts, - events, - serviceEvents, - header.ID(), - ) - - require.NoError(t, err) - require.NoError(t, chunkBatch.Commit()) - err2 := batch.Commit() - require.NoError(t, err2) - - batch = db.NewBatch() - defer batch.Close() - - chunkBatch = pebbleimpl.ToDB(pdb).NewBatch() - defer chunkBatch.Close() - - // remove again to test for duplicates handling - err = removeForBlockID( - batch, - chunkBatch, - commits, - txResults, - results, - chunkDataPacks, - myReceipts, - events, - serviceEvents, - header.ID(), - ) - - require.NoError(t, err) - require.NoError(t, chunkBatch.Commit()) - - err2 = batch.Commit() - require.NoError(t, err2) - - computationResult2 := testutil.ComputationResultFixture(t) - computationResult2.ExecutableBlock = executableBlock - computationResult2.ExecutionResult.BlockID = header.ID() + lockManager := storage.NewTestingLockManager() + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + + // bootstrap to init highest executed height + bootstrapper := bootstrap.NewBootstrapper(unittest.Logger()) + genesis := unittest.BlockFixture() + rootSeal := unittest.Seal.Fixture() + unittest.Seal.WithBlock(genesis.Header)(rootSeal) + + db := pebbleimpl.ToDB(pdb) + err := bootstrapper.BootstrapExecutionDatabase(lockManager, db, rootSeal) + require.NoError(t, err) + + // create all modules + metrics := &metrics.NoopCollector{} + all := store.InitAll(metrics, db) + headers := all.Headers + blocks := all.Blocks + commits := store.NewCommits(metrics, db) + results := store.NewExecutionResults(metrics, db) + receipts := store.NewExecutionReceipts(metrics, db, results, bstorage.DefaultCacheSize) + myReceipts := store.NewMyExecutionReceipts(metrics, db, receipts) + events := store.NewEvents(metrics, db) + serviceEvents := store.NewServiceEvents(metrics, db) + transactions := store.NewTransactions(metrics, db) + collections := store.NewCollections(db, transactions) + chunkDataPacks := store.NewChunkDataPacks(metrics, pebbleimpl.ToDB(pdb), collections, bstorage.DefaultCacheSize) + txResults := store.NewTransactionResults(metrics, db, bstorage.DefaultCacheSize) + + unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, &genesis) + }) + }) - // re execute result - err = es.SaveExecutionResults(context.Background(), computationResult2) - require.NoError(t, err) + getLatestFinalized := func() (uint64, error) { + return genesis.Header.Height, nil + } + + // create execution state module + es := state.NewExecutionState( + nil, + commits, + nil, + headers, + chunkDataPacks, + results, + myReceipts, + events, + serviceEvents, + txResults, + db, + getLatestFinalized, + trace.NewNoopTracer(), + nil, + false, + lockManager, + ) + require.NotNil(t, es) + + executableBlock := unittest.ExecutableBlockFixtureWithParent( + nil, + genesis.Header, + &unittest.GenesisStateCommitment) + header := executableBlock.Block.Header + + unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, executableBlock.Block) + }) }) + + computationResult := testutil.ComputationResultFixture(t) + computationResult.ExecutableBlock = executableBlock + computationResult.ExecutionReceipt.ExecutionResult.BlockID = header.ID() + + // save execution results + err = es.SaveExecutionResults(context.Background(), computationResult) + require.NoError(t, err) + + batch := db.NewBatch() + defer batch.Close() + + chunkBatch := db.NewBatch() + defer chunkBatch.Close() + + // remove execution results + err = removeForBlockID( + batch, + chunkBatch, + commits, + txResults, + results, + chunkDataPacks, + myReceipts, + events, + serviceEvents, + header.ID(), + ) + + require.NoError(t, err) + require.NoError(t, chunkBatch.Commit()) + err2 := batch.Commit() + require.NoError(t, err2) + + batch = db.NewBatch() + defer batch.Close() + + chunkBatch = db.NewBatch() + defer chunkBatch.Close() + + // remove again to test for duplicates handling + err = removeForBlockID( + batch, + chunkBatch, + commits, + txResults, + results, + chunkDataPacks, + myReceipts, + events, + serviceEvents, + header.ID(), + ) + + require.NoError(t, err) + require.NoError(t, chunkBatch.Commit()) + + err2 = batch.Commit() + require.NoError(t, err2) + + computationResult2 := testutil.ComputationResultFixture(t) + computationResult2.ExecutableBlock = executableBlock + computationResult2.ExecutionResult.BlockID = header.ID() + + // re execute result + err = es.SaveExecutionResults(context.Background(), computationResult2) + require.NoError(t, err) }) } diff --git a/cmd/util/cmd/rollback-executed-height/cmd/root.go b/cmd/util/cmd/rollback-executed-height/cmd/root.go index f2940816fdf..47cef5b28b2 100644 --- a/cmd/util/cmd/rollback-executed-height/cmd/root.go +++ b/cmd/util/cmd/rollback-executed-height/cmd/root.go @@ -16,7 +16,7 @@ var ( var rootCmd = &cobra.Command{ Use: "rollback-executed-height", Short: "rollback executed height", - Run: run, + RunE: runE, } func Execute() { diff --git a/cmd/util/cmd/root.go b/cmd/util/cmd/root.go index 44b92410ade..ccb4392fe45 100644 --- a/cmd/util/cmd/root.go +++ b/cmd/util/cmd/root.go @@ -17,6 +17,7 @@ import ( checkpoint_collect_stats "github.com/onflow/flow-go/cmd/util/cmd/checkpoint-collect-stats" checkpoint_list_tries "github.com/onflow/flow-go/cmd/util/cmd/checkpoint-list-tries" checkpoint_trie_stats "github.com/onflow/flow-go/cmd/util/cmd/checkpoint-trie-stats" + db_migration "github.com/onflow/flow-go/cmd/util/cmd/db-migration" debug_script "github.com/onflow/flow-go/cmd/util/cmd/debug-script" debug_tx "github.com/onflow/flow-go/cmd/util/cmd/debug-tx" diff_states "github.com/onflow/flow-go/cmd/util/cmd/diff-states" @@ -32,6 +33,7 @@ import ( find_trie_root "github.com/onflow/flow-go/cmd/util/cmd/find-trie-root" generate_authorization_fixes "github.com/onflow/flow-go/cmd/util/cmd/generate-authorization-fixes" "github.com/onflow/flow-go/cmd/util/cmd/leaders" + pebble_checkpoint "github.com/onflow/flow-go/cmd/util/cmd/pebble-checkpoint" read_badger "github.com/onflow/flow-go/cmd/util/cmd/read-badger/cmd" read_execution_state "github.com/onflow/flow-go/cmd/util/cmd/read-execution-state" read_hotstuff "github.com/onflow/flow-go/cmd/util/cmd/read-hotstuff/cmd" @@ -132,6 +134,8 @@ func addCommands() { rootCmd.AddCommand(evm_state_exporter.Cmd) rootCmd.AddCommand(verify_execution_result.Cmd) rootCmd.AddCommand(verify_evm_offchain_replay.Cmd) + rootCmd.AddCommand(pebble_checkpoint.Cmd) + rootCmd.AddCommand(db_migration.Cmd) } func initConfig() { diff --git a/cmd/util/cmd/snapshot/cmd.go b/cmd/util/cmd/snapshot/cmd.go index e45102ad09d..394bd244e0c 100644 --- a/cmd/util/cmd/snapshot/cmd.go +++ b/cmd/util/cmd/snapshot/cmd.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/storage" ) var ( @@ -43,14 +44,18 @@ func init() { } func run(*cobra.Command, []string) { + lockManager := storage.MakeSingletonLockManager() - db := common.InitStorage(flagDatadir) + db, err := common.InitStorage(flagDatadir) + if err != nil { + log.Fatal().Err(err).Msg("could not init storage") + } defer db.Close() storages := common.InitStorages(db) - state, err := common.InitProtocolState(db, storages) + state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { - log.Fatal().Err(err).Msg("could not init protocol state") + log.Fatal().Err(err).Msg("could not open protocol state") } log := log.With().Uint64("block_height", flagHeight).Logger() diff --git a/cmd/util/cmd/verify-evm-offchain-replay/verify.go b/cmd/util/cmd/verify-evm-offchain-replay/verify.go index 47b34c72afa..a86e61ca1b5 100644 --- a/cmd/util/cmd/verify-evm-offchain-replay/verify.go +++ b/cmd/util/cmd/verify-evm-offchain-replay/verify.go @@ -6,7 +6,6 @@ import ( "os" "path/filepath" - "github.com/dgraph-io/badger/v2" badgerds "github.com/ipfs/go-ds-badger2" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -18,6 +17,7 @@ import ( "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) // Verify verifies the offchain replay of EVM blocks from the given height range @@ -139,18 +139,21 @@ func loadState(height uint64, gobDir string) (*testutils.TestValueStore, error) } func initStorages(dataDir string, executionDataDir string) ( - *badger.DB, - *storage.All, + storage.DB, + *store.All, execution_data.ExecutionDataGetter, io.Closer, error, ) { - db := common.InitStorage(dataDir) + db, err := common.InitStorage(dataDir) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("could not initialize storage: %w", err) + } storages := common.InitStorages(db) datastoreDir := filepath.Join(executionDataDir, "blobstore") - err := os.MkdirAll(datastoreDir, 0700) + err = os.MkdirAll(datastoreDir, 0700) if err != nil { return nil, nil, nil, nil, err } diff --git a/cmd/util/cmd/verify_execution_result/cmd.go b/cmd/util/cmd/verify_execution_result/cmd.go index 911eaa617ff..62123f0b17c 100644 --- a/cmd/util/cmd/verify_execution_result/cmd.go +++ b/cmd/util/cmd/verify_execution_result/cmd.go @@ -10,16 +10,18 @@ import ( "github.com/onflow/flow-go/engine/verification/verifier" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" ) var ( - flagLastK uint64 - flagDatadir string - flagChunkDataPackDir string - flagChain string - flagFromTo string - flagWorkerCount uint // number of workers to verify the blocks concurrently - flagStopOnMismatch bool + flagLastK uint64 + flagDatadir string + flagChunkDataPackDir string + flagChain string + flagFromTo string + flagWorkerCount uint // number of workers to verify the blocks concurrently + flagStopOnMismatch bool + flagtransactionFeesDisabled bool ) // # verify the last 100 sealed blocks @@ -54,9 +56,12 @@ func init() { "number of workers to use for verification, default is 1") Cmd.Flags().BoolVar(&flagStopOnMismatch, "stop_on_mismatch", false, "stop verification on first mismatch") + + Cmd.Flags().BoolVar(&flagtransactionFeesDisabled, "fees_disabled", false, "disable transaction fees") } func run(*cobra.Command, []string) { + lockManager := storage.MakeSingletonLockManager() chainID := flow.ChainID(flagChain) _ = chainID.Chain() @@ -81,7 +86,7 @@ func run(*cobra.Command, []string) { } lg.Info().Msgf("verifying range from %d to %d", from, to) - err = verifier.VerifyRange(from, to, chainID, flagDatadir, flagChunkDataPackDir, flagWorkerCount, flagStopOnMismatch) + err = verifier.VerifyRange(lockManager, from, to, chainID, flagDatadir, flagChunkDataPackDir, flagWorkerCount, flagStopOnMismatch, flagtransactionFeesDisabled) if err != nil { lg.Fatal().Err(err).Msgf("could not verify range from %d to %d", from, to) } @@ -89,7 +94,7 @@ func run(*cobra.Command, []string) { } else { lg.Info().Msgf("verifying last %d sealed blocks", flagLastK) - err := verifier.VerifyLastKHeight(flagLastK, chainID, flagDatadir, flagChunkDataPackDir, flagWorkerCount, flagStopOnMismatch) + err := verifier.VerifyLastKHeight(lockManager, flagLastK, chainID, flagDatadir, flagChunkDataPackDir, flagWorkerCount, flagStopOnMismatch, flagtransactionFeesDisabled) if err != nil { lg.Fatal().Err(err).Msg("could not verify last k height") } diff --git a/cmd/util/ledger/migrations/add_key_migration.go b/cmd/util/ledger/migrations/add_key_migration.go index d0f9f87fb9d..2fcca8db493 100644 --- a/cmd/util/ledger/migrations/add_key_migration.go +++ b/cmd/util/ledger/migrations/add_key_migration.go @@ -149,12 +149,12 @@ func (m *AddKeyMigration) MigrateAccount( flowAddress := flow.ConvertAddress(address) - keyIndex, err := migrationRuntime.Accounts.GetPublicKeyCount(flowAddress) + keyIndex, err := migrationRuntime.Accounts.GetAccountPublicKeyCount(flowAddress) if err != nil { return fmt.Errorf("failed to get public key count: %w", err) } - err = migrationRuntime.Accounts.AppendPublicKey(flowAddress, key) + err = migrationRuntime.Accounts.AppendAccountPublicKey(flowAddress, key) if err != nil { return fmt.Errorf("failed to append public key: %w", err) } diff --git a/cmd/util/ledger/migrations/storage_used_migration_test.go b/cmd/util/ledger/migrations/storage_used_migration_test.go index b0196dc96b8..5b8f0d4ffc9 100644 --- a/cmd/util/ledger/migrations/storage_used_migration_test.go +++ b/cmd/util/ledger/migrations/storage_used_migration_test.go @@ -95,7 +95,7 @@ func TestAccountStatusMigration(t *testing.T) { require.Equal(t, sizeOfTheStatusPayload, accountStatus.StorageUsed()) require.Equal(t, atree.SlabIndex{0, 0, 0, 0, 0, 0, 0, 6}, accountStatus.SlabIndex()) - require.Equal(t, uint32(5), accountStatus.PublicKeyCount()) + require.Equal(t, uint32(5), accountStatus.AccountPublicKeyCount()) require.Equal(t, uint64(0), accountStatus.AccountIdCounter()) }) t.Run("status register v2", func(t *testing.T) { @@ -123,7 +123,7 @@ func TestAccountStatusMigration(t *testing.T) { require.Equal(t, sizeOfTheStatusPayload, accountStatus.StorageUsed()) require.Equal(t, atree.SlabIndex{0, 0, 0, 0, 0, 0, 0, 6}, accountStatus.SlabIndex()) - require.Equal(t, uint32(5), accountStatus.PublicKeyCount()) + require.Equal(t, uint32(5), accountStatus.AccountPublicKeyCount()) require.Equal(t, uint64(3), accountStatus.AccountIdCounter()) }) @@ -152,7 +152,7 @@ func TestAccountStatusMigration(t *testing.T) { require.Equal(t, sizeOfTheStatusPayload, accountStatus.StorageUsed()) require.Equal(t, atree.SlabIndex{0, 0, 0, 0, 0, 0, 0, 6}, accountStatus.SlabIndex()) - require.Equal(t, uint32(5), accountStatus.PublicKeyCount()) + require.Equal(t, uint32(5), accountStatus.AccountPublicKeyCount()) require.Equal(t, uint64(3), accountStatus.AccountIdCounter()) }) @@ -202,7 +202,7 @@ func TestAccountStatusMigration(t *testing.T) { require.Equal(t, sizeOfTheStatusPayload+dataRegisterSize, accountStatus.StorageUsed()) require.Equal(t, atree.SlabIndex{0, 0, 0, 0, 0, 0, 0, 6}, accountStatus.SlabIndex()) - require.Equal(t, uint32(5), accountStatus.PublicKeyCount()) + require.Equal(t, uint32(5), accountStatus.AccountPublicKeyCount()) require.Equal(t, uint64(3), accountStatus.AccountIdCounter()) }) } diff --git a/cmd/verification_builder.go b/cmd/verification_builder.go index 7e6c8d0fcfe..3ba1283639b 100644 --- a/cmd/verification_builder.go +++ b/cmd/verification_builder.go @@ -38,7 +38,6 @@ import ( badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger" "github.com/onflow/flow-go/storage/dbops" "github.com/onflow/flow-go/storage/store" ) @@ -169,15 +168,8 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { var ok bool var err error - if dbops.IsBadgerTransaction(node.DBOps) { - queue := badger.NewChunkQueue(node.DB) - ok, err = queue.Init(chunkconsumer.DefaultJobIndex) - if err != nil { - return fmt.Errorf("could not initialize default index in chunks queue: %w", err) - } - - chunkQueue = queue - node.Logger.Info().Msgf("chunks queue index has been initialized with badger db transaction updates") + if dbops.IsBadgerTransaction(v.DBOps) { + return fmt.Errorf("badger transaction is not supported for chunks queue") } else if dbops.IsBatchUpdate(node.DBOps) { queue := store.NewChunkQueue(node.Metrics.Cache, node.ProtocolDB) ok, err = queue.Init(chunkconsumer.DefaultJobIndex) @@ -226,9 +218,9 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { var approvalStorage storage.ResultApprovals if dbops.IsBadgerTransaction(v.DBOps) { - approvalStorage = badger.NewResultApprovals(node.Metrics.Cache, node.DB) + return nil, fmt.Errorf("badger transaction is not supported for approval storage") } else if dbops.IsBatchUpdate(v.DBOps) { - approvalStorage = store.NewResultApprovals(node.Metrics.Cache, node.ProtocolDB) + approvalStorage = store.NewResultApprovals(node.Metrics.Cache, node.ProtocolDB, node.StorageLockMgr) } else { return nil, fmt.Errorf("invalid db opts type: %v", v.DBOps) } @@ -241,7 +233,9 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { node.State, node.Me, chunkVerifier, - approvalStorage) + approvalStorage, + node.StorageLockMgr, + ) return verifierEng, err }). Component("chunk consumer, requester, and fetcher engines", func(node *NodeConfig) (module.ReadyDoneAware, error) { @@ -363,7 +357,7 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { Component("follower core", func(node *NodeConfig) (module.ReadyDoneAware, error) { // create a finalizer that handles updating the protocol // state when the follower detects newly finalized blocks - final := finalizer.NewFinalizer(node.DB, node.Storage.Headers, followerState, node.Tracer) + final := finalizer.NewFinalizer(node.ProtocolDB.Reader(), node.Storage.Headers, followerState, node.Tracer) finalized, pending, err := recoveryprotocol.FindLatest(node.State, node.Storage.Headers) if err != nil { diff --git a/consensus/follower_test.go b/consensus/follower_test.go index 0efed602dfa..66beebcf872 100644 --- a/consensus/follower_test.go +++ b/consensus/follower_test.go @@ -167,8 +167,7 @@ func (s *HotStuffFollowerSuite) TestOnBlockIncorporated() { rootBlockView := s.rootHeader.View child := s.mockConsensus.extendBlock(rootBlockView+2, s.rootHeader) grandChild := s.mockConsensus.extendBlock(child.View+2, child) - - certifiedChild := toCertifiedBlock(s.T(), child, grandChild.QuorumCertificate()) + certifiedChild := toCertifiedBlock(s.T(), child, grandChild.ParentQC()) blockIngested := make(chan struct{}) // close when child was ingested s.notifier.On("OnBlockIncorporated", blockWithID(child.ID())).Run(func(_ mock.Arguments) { close(blockIngested) @@ -205,13 +204,13 @@ func (s *HotStuffFollowerSuite) TestFollowerFinalizedBlock() { d := s.mockConsensus.extendBlock(c.View+1, c) // adding b should not advance finality - bCertified := toCertifiedBlock(s.T(), b, c.QuorumCertificate()) + bCertified := toCertifiedBlock(s.T(), b, c.ParentQC()) s.notifier.On("OnBlockIncorporated", blockWithID(b.ID())).Return().Once() s.follower.AddCertifiedBlock(bCertified) // adding the certified child of b should advance finality to b finalityAdvanced := make(chan struct{}) // close when finality has advanced to b - certifiedChild := toCertifiedBlock(s.T(), c, d.QuorumCertificate()) + certifiedChild := toCertifiedBlock(s.T(), c, d.ParentQC()) s.notifier.On("OnBlockIncorporated", blockWithID(certifiedChild.ID())).Return().Once() s.finalizer.On("MakeFinal", blockID(b.ID())).Return(nil).Once() s.notifier.On("OnFinalizedBlock", blockWithID(b.ID())).Run(func(_ mock.Arguments) { @@ -279,13 +278,13 @@ func (s *HotStuffFollowerSuite) TestOutOfOrderBlocks() { // now we feed the blocks in some wild view order into the Follower // (Caution: we still have to make sure the parent is known, before we give its child to the Follower) - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block03, block04.QuorumCertificate())) - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block07, block08.QuorumCertificate())) - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block11, block17.QuorumCertificate())) - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block01, block02.QuorumCertificate())) - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block05, block06.QuorumCertificate())) - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block09, block10.QuorumCertificate())) - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block13, block14.QuorumCertificate())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block03, block04.ParentQC())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block07, block08.ParentQC())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block11, block17.ParentQC())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block01, block02.ParentQC())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block05, block06.ParentQC())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block09, block10.ParentQC())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block13, block14.ParentQC())) // Block 20 should now finalize the fork up to and including block13 finalityAdvanced := make(chan struct{}) // close when finality has advanced to b @@ -300,7 +299,7 @@ func (s *HotStuffFollowerSuite) TestOutOfOrderBlocks() { close(finalityAdvanced) }).Return(nil).Once() - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block14, block20.QuorumCertificate())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block14, block20.ParentQC())) unittest.RequireCloseBefore(s.T(), finalityAdvanced, time.Second, "expect finality progress before timeout") } diff --git a/consensus/hotstuff/consumer.go b/consensus/hotstuff/consumer.go index 2fcf2e4703e..32a37a3470a 100644 --- a/consensus/hotstuff/consumer.go +++ b/consensus/hotstuff/consumer.go @@ -307,7 +307,7 @@ type CommunicatorConsumer interface { // Prerequisites: // Implementation must be concurrency safe; Non-blocking; // and must handle repetition of the same events (with some processing overhead). - OnOwnVote(blockID flow.Identifier, view uint64, sigData []byte, recipientID flow.Identifier) + OnOwnVote(vote *model.Vote, recipientID flow.Identifier) // OnOwnTimeout notifies about intent to broadcast the given timeout object(TO) to all actors of the consensus process. // Prerequisites: diff --git a/consensus/hotstuff/eventhandler/event_handler.go b/consensus/hotstuff/eventhandler/event_handler.go index d119ee2b681..1e08b544a05 100644 --- a/consensus/hotstuff/eventhandler/event_handler.go +++ b/consensus/hotstuff/eventhandler/event_handler.go @@ -507,7 +507,6 @@ func (e *EventHandler) ownVote(proposal *model.SignedProposal, curView uint64, n } log.Debug().Msg("forwarding vote to compliance engine") - // raise a notification to send vote - e.notifier.OnOwnVote(ownVote.BlockID, ownVote.View, ownVote.SigData, nextLeader) + e.notifier.OnOwnVote(ownVote, nextLeader) return nil } diff --git a/consensus/hotstuff/eventhandler/event_handler_test.go b/consensus/hotstuff/eventhandler/event_handler_test.go index c9168b8afcd..da0e782fc93 100644 --- a/consensus/hotstuff/eventhandler/event_handler_test.go +++ b/consensus/hotstuff/eventhandler/event_handler_test.go @@ -433,7 +433,12 @@ func (es *EventHandlerSuite) TestOnReceiveProposal_Vote_NextLeader() { // proposal is safe to vote es.safetyRules.votable[proposal.Block.BlockID] = struct{}{} - es.notifier.On("OnOwnVote", proposal.Block.BlockID, proposal.Block.View, mock.Anything, mock.Anything).Once() + vote := &model.Vote{ + BlockID: proposal.Block.BlockID, + View: proposal.Block.View, + } + + es.notifier.On("OnOwnVote", vote, mock.Anything).Once() // vote should be created for this proposal err := es.eventhandler.OnReceiveProposal(proposal) @@ -449,7 +454,13 @@ func (es *EventHandlerSuite) TestOnReceiveProposal_Vote_NotNextLeader() { // proposal is safe to vote es.safetyRules.votable[proposal.Block.BlockID] = struct{}{} - es.notifier.On("OnOwnVote", proposal.Block.BlockID, mock.Anything, mock.Anything, mock.Anything).Once() + vote := &model.Vote{ + BlockID: proposal.Block.BlockID, + View: proposal.Block.View, + SignerID: flow.ZeroID, + } + + es.notifier.On("OnOwnVote", vote, mock.Anything).Once() // vote should be created for this proposal err := es.eventhandler.OnReceiveProposal(proposal) @@ -809,7 +820,11 @@ func (es *EventHandlerSuite) TestLeaderBuild100Blocks() { require.True(es.T(), ok) require.Equal(es.T(), proposal.Block.View+1, header.View) }).Once() - es.notifier.On("OnOwnVote", proposal.Block.BlockID, proposal.Block.View, mock.Anything, mock.Anything).Once() + vote := &model.Vote{ + View: proposal.Block.View, + BlockID: proposal.Block.BlockID, + } + es.notifier.On("OnOwnVote", vote, mock.Anything).Once() err := es.eventhandler.OnReceiveProposal(proposal) require.NoError(es.T(), err) @@ -1029,8 +1044,8 @@ func createQC(parent *model.Block) *flow.QuorumCertificate { qc := &flow.QuorumCertificate{ BlockID: parent.BlockID, View: parent.View, - SignerIndices: nil, - SigData: nil, + SignerIndices: unittest.SignerIndicesFixture(3), + SigData: unittest.SignatureFixture(), } return qc } diff --git a/consensus/hotstuff/helper/timeout_certificate.go b/consensus/hotstuff/helper/timeout_certificate.go index 42ffe64d8d9..a6d4af3b10f 100644 --- a/consensus/hotstuff/helper/timeout_certificate.go +++ b/consensus/hotstuff/helper/timeout_certificate.go @@ -9,14 +9,15 @@ import ( ) func MakeTC(options ...func(*flow.TimeoutCertificate)) *flow.TimeoutCertificate { - qc := MakeQC() + tcView := rand.Uint64() + qc := MakeQC(WithQCView(tcView - 1)) signerIndices := unittest.SignerIndicesFixture(3) highQCViews := make([]uint64, 3) for i := range highQCViews { highQCViews[i] = qc.View } tc := flow.TimeoutCertificate{ - View: rand.Uint64(), + View: tcView, NewestQC: qc, NewestQCViews: []uint64{qc.View}, SignerIndices: signerIndices, @@ -54,12 +55,18 @@ func WithTCHighQCViews(highQCViews []uint64) func(*flow.TimeoutCertificate) { } func TimeoutObjectFixture(opts ...func(TimeoutObject *hotstuff.TimeoutObject)) *hotstuff.TimeoutObject { + timeoutView := uint64(rand.Uint32()) + newestQC := MakeQC(WithQCView(timeoutView - 10)) + timeout := &hotstuff.TimeoutObject{ - View: uint64(rand.Uint32()), - NewestQC: MakeQC(), - LastViewTC: MakeTC(), - SignerID: unittest.IdentifierFixture(), - SigData: unittest.RandomBytes(128), + View: timeoutView, + NewestQC: newestQC, + LastViewTC: MakeTC( + WithTCView(timeoutView-1), + WithTCNewestQC(MakeQC(WithQCView(newestQC.View))), + ), + SignerID: unittest.IdentifierFixture(), + SigData: unittest.RandomBytes(128), } for _, opt := range opts { diff --git a/consensus/hotstuff/integration/connect_test.go b/consensus/hotstuff/integration/connect_test.go index 177a8d0244b..f9588541c8d 100644 --- a/consensus/hotstuff/integration/connect_test.go +++ b/consensus/hotstuff/integration/connect_test.go @@ -64,19 +64,12 @@ func Connect(t *testing.T, instances []*Instance) { } }, ) - sender.notifier.On("OnOwnVote", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Run( + sender.notifier.On("OnOwnVote", mock.Anything, mock.Anything).Run( func(args mock.Arguments) { - blockID, ok := args[0].(flow.Identifier) + vote, ok := args[0].(*model.Vote) require.True(t, ok) - view, ok := args[1].(uint64) + recipientID, ok := args[1].(flow.Identifier) require.True(t, ok) - sigData, ok := args[2].([]byte) - require.True(t, ok) - recipientID, ok := args[3].(flow.Identifier) - require.True(t, ok) - // convert into vote - vote := model.VoteFromFlow(sender.localID, blockID, view, sigData) - // get the receiver receiver, exists := lookup[recipientID] if !exists { diff --git a/consensus/hotstuff/integration/instance_test.go b/consensus/hotstuff/integration/instance_test.go index 83cdf641ff6..97a7de9f616 100644 --- a/consensus/hotstuff/integration/instance_test.go +++ b/consensus/hotstuff/integration/instance_test.go @@ -308,13 +308,9 @@ func NewInstance(t *testing.T, options ...Option) *Instance { ) // in case of single node setup we should just forward vote to our own node // for multi-node setup this method will be overridden - in.notifier.On("OnOwnVote", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - in.queue <- &model.Vote{ - View: args[1].(uint64), - BlockID: args[0].(flow.Identifier), - SignerID: in.localID, - SigData: args[2].([]byte), - } + in.notifier.On("OnOwnVote", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + vote := args[1].(*model.Vote) + in.queue <- vote }) // program the finalizer module behaviour @@ -356,7 +352,12 @@ func NewInstance(t *testing.T, options ...Option) *Instance { notifier.AddConsumer(in.notifier) // initialize the finalizer - rootBlock := model.BlockFromFlow(cfg.Root) + var rootBlock *model.Block + if cfg.Root.ContainsParentQC() { + rootBlock = model.BlockFromFlow(cfg.Root) + } else { + rootBlock = model.GenesisBlockFromFlow(cfg.Root) + } signerIndices, err := msig.EncodeSignersToIndices(in.participants.NodeIDs(), in.participants.NodeIDs()) require.NoError(t, err, "could not encode signer indices") @@ -365,6 +366,7 @@ func NewInstance(t *testing.T, options ...Option) *Instance { View: rootBlock.View, BlockID: rootBlock.BlockID, SignerIndices: signerIndices, + SigData: unittest.SignatureFixture(), } certifiedRootBlock, err := model.NewCertifiedBlock(rootBlock, rootQC) require.NoError(t, err) @@ -418,7 +420,10 @@ func NewInstance(t *testing.T, options ...Option) *Instance { minRequiredWeight, ) - err := processor.Process(proposal.ProposerVote()) + vote, err := proposal.ProposerVote() + require.NoError(t, err) + + err = processor.Process(vote) if err != nil { t.Fatalf("invalid vote for own proposal: %v", err) } diff --git a/consensus/hotstuff/mocks/communicator_consumer.go b/consensus/hotstuff/mocks/communicator_consumer.go index 231e47b6904..088ded9429a 100644 --- a/consensus/hotstuff/mocks/communicator_consumer.go +++ b/consensus/hotstuff/mocks/communicator_consumer.go @@ -27,9 +27,9 @@ func (_m *CommunicatorConsumer) OnOwnTimeout(timeout *model.TimeoutObject) { _m.Called(timeout) } -// OnOwnVote provides a mock function with given fields: blockID, view, sigData, recipientID -func (_m *CommunicatorConsumer) OnOwnVote(blockID flow.Identifier, view uint64, sigData []byte, recipientID flow.Identifier) { - _m.Called(blockID, view, sigData, recipientID) +// OnOwnVote provides a mock function with given fields: vote, recipientID +func (_m *CommunicatorConsumer) OnOwnVote(vote *model.Vote, recipientID flow.Identifier) { + _m.Called(vote, recipientID) } // NewCommunicatorConsumer creates a new instance of CommunicatorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. diff --git a/consensus/hotstuff/mocks/consumer.go b/consensus/hotstuff/mocks/consumer.go index 324516e2193..3efb12d51be 100644 --- a/consensus/hotstuff/mocks/consumer.go +++ b/consensus/hotstuff/mocks/consumer.go @@ -63,9 +63,9 @@ func (_m *Consumer) OnOwnTimeout(timeout *model.TimeoutObject) { _m.Called(timeout) } -// OnOwnVote provides a mock function with given fields: blockID, view, sigData, recipientID -func (_m *Consumer) OnOwnVote(blockID flow.Identifier, view uint64, sigData []byte, recipientID flow.Identifier) { - _m.Called(blockID, view, sigData, recipientID) +// OnOwnVote provides a mock function with given fields: vote, recipientID +func (_m *Consumer) OnOwnVote(vote *model.Vote, recipientID flow.Identifier) { + _m.Called(vote, recipientID) } // OnPartialTc provides a mock function with given fields: currentView, partialTc diff --git a/consensus/hotstuff/model/block.go b/consensus/hotstuff/model/block.go index 6c682514dfc..1d170239003 100644 --- a/consensus/hotstuff/model/block.go +++ b/consensus/hotstuff/model/block.go @@ -23,8 +23,8 @@ func BlockFromFlow(header *flow.Header) *Block { block := Block{ BlockID: header.ID(), View: header.View, - QC: header.QuorumCertificate(), ProposerID: header.ProposerID, + QC: header.ParentQC(), PayloadHash: header.PayloadHash, Timestamp: header.Timestamp, } diff --git a/consensus/hotstuff/model/proposal.go b/consensus/hotstuff/model/proposal.go index 6dcca721583..d05f69b648e 100644 --- a/consensus/hotstuff/model/proposal.go +++ b/consensus/hotstuff/model/proposal.go @@ -45,14 +45,14 @@ type SignedProposal struct { } // ProposerVote extracts the proposer vote from the proposal -func (p *SignedProposal) ProposerVote() *Vote { - vote := Vote{ +// All errors indicate a valid Vote cannot be constructed from the receiver SignedProposal. +func (p *SignedProposal) ProposerVote() (*Vote, error) { + return NewVote(UntrustedVote{ View: p.Block.View, BlockID: p.Block.BlockID, SignerID: p.Block.ProposerID, SigData: p.SigData, - } - return &vote + }) } // SignedProposalFromFlow turns a flow header into a hotstuff block type. diff --git a/consensus/hotstuff/model/timeout.go b/consensus/hotstuff/model/timeout.go index a40f3cca680..e2c5252ffb5 100644 --- a/consensus/hotstuff/model/timeout.go +++ b/consensus/hotstuff/model/timeout.go @@ -36,6 +36,8 @@ type NewViewEvent TimerInfo // TimeoutObject represents intent of replica to leave its current view with a timeout. This concept is very similar to // HotStuff vote. Valid TimeoutObject is signed by staking key. +// +//structwrite:immutable - mutations allowed only within the constructor type TimeoutObject struct { // View is the view number which is replica is timing out View uint64 @@ -61,6 +63,69 @@ type TimeoutObject struct { TimeoutTick uint64 } +// UntrustedTimeoutObject is an untrusted input-only representation of a TimeoutObject, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedTimeoutObject should be validated and converted into +// a trusted TimeoutObject using NewTimeoutObject constructor. +type UntrustedTimeoutObject TimeoutObject + +// NewTimeoutObject creates a new instance of TimeoutObject. +// Construction TimeoutObject allowed only within the constructor. +// +// All errors indicate a valid TimeoutObject cannot be constructed from the input. +func NewTimeoutObject(untrusted UntrustedTimeoutObject) (*TimeoutObject, error) { + if untrusted.NewestQC == nil { + return nil, fmt.Errorf("newest QC must not be nil") + } + if untrusted.SignerID == flow.ZeroID { + return nil, fmt.Errorf("signer ID must not be zero") + } + if len(untrusted.SigData) == 0 { + return nil, fmt.Errorf("signature must not be empty") + } + if untrusted.View <= untrusted.NewestQC.View { + return nil, fmt.Errorf("TO's QC %d cannot be newer than the TO's view %d", untrusted.NewestQC.View, untrusted.View) + } + + // If a TC is included, the TC must be for the past round, no matter whether a QC + // for the last round is also included. In some edge cases, a node might observe + // _both_ QC and TC for the previous round, in which case it can include both. + if untrusted.LastViewTC != nil { + if untrusted.View != untrusted.LastViewTC.View+1 { + return nil, fmt.Errorf("invalid TC for non-previous view, expected view %d, got view %d", untrusted.View-1, untrusted.LastViewTC.View) + } + if untrusted.NewestQC.View < untrusted.LastViewTC.NewestQC.View { + return nil, fmt.Errorf("timeout.NewestQC is older (view=%d) than the QC in timeout.LastViewTC (view=%d)", untrusted.NewestQC.View, untrusted.LastViewTC.NewestQC.View) + } + } + // The TO must contain a proof that sender legitimately entered View. Transitioning + // to round timeout.View is possible either by observing a QC or a TC for the previous round. + // If no QC is included, we require a TC to be present, which by check must be for + // the previous round. + lastViewSuccessful := untrusted.View == untrusted.NewestQC.View+1 + if !lastViewSuccessful { + // The TO's sender did _not_ observe a QC for round timeout.View-1. Hence, it should + // include a TC for the previous round. Otherwise, the TO is invalid. + if untrusted.LastViewTC == nil { + return nil, fmt.Errorf("must include TC") + } + } + + return &TimeoutObject{ + View: untrusted.View, + NewestQC: untrusted.NewestQC, + LastViewTC: untrusted.LastViewTC, + SignerID: untrusted.SignerID, + SigData: untrusted.SigData, + TimeoutTick: untrusted.TimeoutTick, + }, nil +} + // ID returns the TimeoutObject's identifier func (t *TimeoutObject) ID() flow.Identifier { body := struct { diff --git a/consensus/hotstuff/model/timeout_test.go b/consensus/hotstuff/model/timeout_test.go new file mode 100644 index 00000000000..43af32f591e --- /dev/null +++ b/consensus/hotstuff/model/timeout_test.go @@ -0,0 +1,158 @@ +package model_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/consensus/hotstuff/helper" + "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/model/flow" +) + +// TestNewTimeoutObject verifies the behavior of the NewTimeoutObject constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedTimeoutObject results in a valid TimeoutObject. +// +// 2. Invalid input with nil NewestQC: +// - Ensures an error is returned when the NewestQC field is nil. +// +// 3. Invalid input with zero SignerID: +// - Ensures an error is returned when the SignerID is flow.ZeroID. +// +// 4. Invalid input with nil SigData: +// - Ensures an error is returned when the SigData field is nil. +// +// 5. Invalid input with empty SigData: +// - Ensures an error is returned when the SigData field is an empty byte slice. +// +// 6. Invalid input when View is lower than or equal to NewestQC.View: +// - Ensures an error is returned when the TimeoutObject's View is less than or equal to the included QC's View. +// +// 7. Invalid input when TC present but for wrong view: +// - Ensures an error is returned when LastViewTC.View is not one less than the TimeoutObject's View. +// +// 8. Invalid input when TC's QC newer than TimeoutObject's QC: +// - Ensures an error is returned when TimeoutObject's NewestQC.View is older than LastViewTC.NewestQC.View. +// +// 9. Invalid input when LastViewTC missing when QC does not prove previous round: +// - Ensures an error is returned when TimeoutObject lacks both a QC for previous round and a LastViewTC. +func TestNewTimeoutObject(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + res, err := model.NewTimeoutObject(model.UntrustedTimeoutObject(*helper.TimeoutObjectFixture())) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("invalid input with nil NewestQC", func(t *testing.T) { + to := helper.TimeoutObjectFixture() + to.NewestQC = nil + + res, err := model.NewTimeoutObject(model.UntrustedTimeoutObject(*to)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "newest QC must not be nil") + }) + + t.Run("invalid input with zero SignerID", func(t *testing.T) { + to := helper.TimeoutObjectFixture() + to.SignerID = flow.ZeroID + + res, err := model.NewTimeoutObject(model.UntrustedTimeoutObject(*to)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "signer ID must not be zero") + }) + + t.Run("invalid input with nil SigData", func(t *testing.T) { + to := helper.TimeoutObjectFixture() + to.SigData = nil + + res, err := model.NewTimeoutObject(model.UntrustedTimeoutObject(*to)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "signature must not be empty") + }) + + t.Run("invalid input with empty SigData", func(t *testing.T) { + to := helper.TimeoutObjectFixture() + to.SigData = []byte{} + + res, err := model.NewTimeoutObject(model.UntrustedTimeoutObject(*to)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "signature must not be empty") + }) + + t.Run("invalid input when View <= NewestQC.View", func(t *testing.T) { + qc := helper.MakeQC(helper.WithQCView(100)) + res, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject( + *helper.TimeoutObjectFixture( + helper.WithTimeoutNewestQC(qc), + helper.WithTimeoutObjectView(100), // Equal to QC view + ), + )) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "TO's QC 100 cannot be newer than the TO's view 100") + }) + + t.Run("invalid input when LastViewTC.View is not View - 1", func(t *testing.T) { + tc := helper.MakeTC(helper.WithTCView(50)) + qc := helper.MakeQC(helper.WithQCView(40)) + + result, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject( + *helper.TimeoutObjectFixture( + helper.WithTimeoutObjectView(100), + helper.WithTimeoutNewestQC(qc), + helper.WithTimeoutLastViewTC(tc), + ), + ), + ) + require.Error(t, err) + require.Nil(t, result) + assert.Contains(t, err.Error(), "invalid TC for non-previous view") + }) + + t.Run("invalid input when TimeoutObject's QC is older than TC's QC", func(t *testing.T) { + tcQC := helper.MakeQC(helper.WithQCView(150)) + tc := helper.MakeTC(helper.WithTCNewestQC(tcQC), helper.WithTCView(99)) + + res, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject( + *helper.TimeoutObjectFixture( + helper.WithTimeoutObjectView(100), + helper.WithTimeoutLastViewTC(tc), + helper.WithTimeoutNewestQC(helper.MakeQC(helper.WithQCView(80))), // older than TC.NewestQC + ), + ), + ) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "timeout.NewestQC is older") + }) + + t.Run("invalid input when no QC for previous round and TC is missing", func(t *testing.T) { + qc := helper.MakeQC(helper.WithQCView(90)) + + res, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject( + *helper.TimeoutObjectFixture( + helper.WithTimeoutObjectView(100), + helper.WithTimeoutNewestQC(qc), + helper.WithTimeoutLastViewTC(nil), + ), + ), + ) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "must include TC") + }) +} diff --git a/consensus/hotstuff/model/vote.go b/consensus/hotstuff/model/vote.go index 0f50e96bdc8..5046028f47f 100644 --- a/consensus/hotstuff/model/vote.go +++ b/consensus/hotstuff/model/vote.go @@ -1,12 +1,14 @@ package model import ( - "github.com/onflow/crypto" + "fmt" "github.com/onflow/flow-go/model/flow" ) // Vote is the HotStuff algorithm's concept of a vote for a block proposal. +// +//structwrite:immutable - mutations allowed only within the constructor type Vote struct { View uint64 BlockID flow.Identifier @@ -14,18 +16,43 @@ type Vote struct { SigData []byte } +// UntrustedVote is an untrusted input-only representation of an Vote, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedVote should be validated and converted into +// a trusted Vote using NewVote constructor. +type UntrustedVote Vote + +// NewVote creates a new instance of Vote. +// Construction Vote allowed only within the constructor +// +// All errors indicate a valid Vote cannot be constructed from the input. +func NewVote(untrusted UntrustedVote) (*Vote, error) { + if untrusted.BlockID == flow.ZeroID { + return nil, fmt.Errorf("BlockID must not be empty") + } + + if untrusted.SignerID == flow.ZeroID { + return nil, fmt.Errorf("SignerID must not be empty") + } + + if len(untrusted.SigData) == 0 { + return nil, fmt.Errorf("SigData must not be empty") + } + + return &Vote{ + View: untrusted.View, + BlockID: untrusted.BlockID, + SignerID: untrusted.SignerID, + SigData: untrusted.SigData, + }, nil +} + // ID returns the identifier for the vote. func (uv *Vote) ID() flow.Identifier { return flow.MakeID(uv) } - -// VoteFromFlow turns the vote parameters into a vote struct. -func VoteFromFlow(signerID flow.Identifier, blockID flow.Identifier, view uint64, sig crypto.Signature) *Vote { - vote := Vote{ - View: view, - BlockID: blockID, - SignerID: signerID, - SigData: sig, - } - return &vote -} diff --git a/consensus/hotstuff/notifications/log_consumer.go b/consensus/hotstuff/notifications/log_consumer.go index 3e454b61272..7e01bc8e408 100644 --- a/consensus/hotstuff/notifications/log_consumer.go +++ b/consensus/hotstuff/notifications/log_consumer.go @@ -268,10 +268,10 @@ func (lc *LogConsumer) OnNewTcDiscovered(tc *flow.TimeoutCertificate) { Msg("new TC discovered") } -func (lc *LogConsumer) OnOwnVote(blockID flow.Identifier, view uint64, sigData []byte, recipientID flow.Identifier) { +func (lc *LogConsumer) OnOwnVote(vote *model.Vote, recipientID flow.Identifier) { lc.log.Debug(). - Hex("block_id", blockID[:]). - Uint64("block_view", view). + Hex("block_id", vote.BlockID[:]). + Uint64("block_view", vote.View). Hex("recipient_id", recipientID[:]). Msg("publishing HotStuff vote") } diff --git a/consensus/hotstuff/notifications/noop_consumer.go b/consensus/hotstuff/notifications/noop_consumer.go index 38433c72b1a..7710a2c74a3 100644 --- a/consensus/hotstuff/notifications/noop_consumer.go +++ b/consensus/hotstuff/notifications/noop_consumer.go @@ -85,7 +85,7 @@ type NoopCommunicatorConsumer struct{} var _ hotstuff.CommunicatorConsumer = (*NoopCommunicatorConsumer)(nil) -func (*NoopCommunicatorConsumer) OnOwnVote(flow.Identifier, uint64, []byte, flow.Identifier) {} +func (*NoopCommunicatorConsumer) OnOwnVote(*model.Vote, flow.Identifier) {} func (*NoopCommunicatorConsumer) OnOwnTimeout(*model.TimeoutObject) {} diff --git a/consensus/hotstuff/notifications/pubsub/communicator_distributor.go b/consensus/hotstuff/notifications/pubsub/communicator_distributor.go index 5e0604fa83c..065f6820ee9 100644 --- a/consensus/hotstuff/notifications/pubsub/communicator_distributor.go +++ b/consensus/hotstuff/notifications/pubsub/communicator_distributor.go @@ -31,11 +31,11 @@ func (d *CommunicatorDistributor) AddCommunicatorConsumer(consumer hotstuff.Comm d.consumers = append(d.consumers, consumer) } -func (d *CommunicatorDistributor) OnOwnVote(blockID flow.Identifier, view uint64, sigData []byte, recipientID flow.Identifier) { +func (d *CommunicatorDistributor) OnOwnVote(vote *model.Vote, recipientID flow.Identifier) { d.lock.RLock() defer d.lock.RUnlock() for _, s := range d.consumers { - s.OnOwnVote(blockID, view, sigData, recipientID) + s.OnOwnVote(vote, recipientID) } } diff --git a/consensus/hotstuff/notifications/telemetry.go b/consensus/hotstuff/notifications/telemetry.go index a636bac3789..8ff127aaf26 100644 --- a/consensus/hotstuff/notifications/telemetry.go +++ b/consensus/hotstuff/notifications/telemetry.go @@ -169,10 +169,10 @@ func (t *TelemetryConsumer) OnTcTriggeredViewChange(oldView uint64, newView uint Msg("OnTcTriggeredViewChange") } -func (t *TelemetryConsumer) OnOwnVote(blockID flow.Identifier, view uint64, _ []byte, recipientID flow.Identifier) { +func (t *TelemetryConsumer) OnOwnVote(vote *model.Vote, recipientID flow.Identifier) { t.pathHandler.NextStep(). - Uint64("voted_block_view", view). - Hex("voted_block_id", logging.ID(blockID)). + Uint64("voted_block_view", vote.View). + Hex("voted_block_id", logging.ID(vote.BlockID)). Hex("recipient_id", logging.ID(recipientID)). Msg("OnOwnVote") } diff --git a/consensus/hotstuff/safetyrules/safety_rules.go b/consensus/hotstuff/safetyrules/safety_rules.go index aa7067ce1bf..e231b47925a 100644 --- a/consensus/hotstuff/safetyrules/safety_rules.go +++ b/consensus/hotstuff/safetyrules/safety_rules.go @@ -181,12 +181,23 @@ func (r *SafetyRules) ProduceTimeout(curView uint64, newestQC *flow.QuorumCertif lastTimeout := r.safetyData.LastTimeout if lastTimeout != nil && lastTimeout.View == curView { // model.TimeoutObject are conceptually immutable, hence we create a shallow copy here, which allows us to increment TimeoutTick - updatedTimeout := *lastTimeout - updatedTimeout.TimeoutTick += 1 + updatedTimeout, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject{ + View: lastTimeout.View, + NewestQC: lastTimeout.NewestQC, + LastViewTC: lastTimeout.LastViewTC, + SignerID: lastTimeout.SignerID, + SigData: lastTimeout.SigData, + TimeoutTick: lastTimeout.TimeoutTick + 1, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct timeout object: %w", err) + } // persist updated TimeoutObject in `safetyData` and return it - r.safetyData.LastTimeout = &updatedTimeout - err := r.persist.PutSafetyData(r.safetyData) + r.safetyData.LastTimeout = updatedTimeout + err = r.persist.PutSafetyData(r.safetyData) if err != nil { return nil, fmt.Errorf("could not persist safety data: %w", err) } diff --git a/consensus/hotstuff/safetyrules/safety_rules_test.go b/consensus/hotstuff/safetyrules/safety_rules_test.go index c4a928ffba1..bad79d1b52f 100644 --- a/consensus/hotstuff/safetyrules/safety_rules_test.go +++ b/consensus/hotstuff/safetyrules/safety_rules_test.go @@ -520,6 +520,9 @@ func (s *SafetyRulesTestSuite) TestProduceTimeout_ShouldTimeout() { expectedTimeout := &model.TimeoutObject{ View: view, NewestQC: newestQC, + // don't care about actual data + SignerID: unittest.IdentifierFixture(), + SigData: unittest.SignatureFixture(), } expectedSafetyData := &hotstuff.SafetyData{ diff --git a/consensus/hotstuff/timeoutcollector/aggregation_test.go b/consensus/hotstuff/timeoutcollector/aggregation_test.go index c0beaf473fa..93eb0774d0a 100644 --- a/consensus/hotstuff/timeoutcollector/aggregation_test.go +++ b/consensus/hotstuff/timeoutcollector/aggregation_test.go @@ -40,7 +40,7 @@ func createAggregationData(t *testing.T, signersNumber int) ( pks := make([]crypto.PublicKey, 0, signersNumber) view := 10 + uint64(rand.Uint32()) for i := 0; i < signersNumber; i++ { - sk := unittest.PrivateKeyFixture(crypto.BLSBLS12381, crypto.KeyGenSeedMinLen) + sk := unittest.PrivateKeyFixture(crypto.BLSBLS12381) identity := unittest.IdentityFixture(unittest.WithStakingPubKey(sk.PublicKey())) // id ids = append(ids, &identity.IdentitySkeleton) @@ -70,7 +70,7 @@ func createAggregationData(t *testing.T, signersNumber int) ( func TestNewTimeoutSignatureAggregator(t *testing.T) { tag := "random_tag" - sk := unittest.PrivateKeyFixture(crypto.ECDSAP256, crypto.KeyGenSeedMinLen) + sk := unittest.PrivateKeyFixture(crypto.ECDSAP256) signer := unittest.IdentityFixture(unittest.WithStakingPubKey(sk.PublicKey())) // wrong key type _, err := NewTimeoutSignatureAggregator(0, flow.IdentitySkeletonList{&signer.IdentitySkeleton}, tag) @@ -191,7 +191,7 @@ func TestTimeoutSignatureAggregator_Aggregate(t *testing.T) { var err error aggregator, ids, pks, sigs, signersInfo, msgs, hashers := createAggregationData(t, signersNum) // replace sig with random one - sk := unittest.PrivateKeyFixture(crypto.BLSBLS12381, crypto.KeyGenSeedMinLen) + sk := unittest.PrivateKeyFixture(crypto.BLSBLS12381) sigs[0], err = sk.Sign([]byte("dummy"), hashers[0]) require.NoError(t, err) diff --git a/consensus/hotstuff/timeoutcollector/timeout_processor.go b/consensus/hotstuff/timeoutcollector/timeout_processor.go index 5c959ea8b8f..d266558d421 100644 --- a/consensus/hotstuff/timeoutcollector/timeout_processor.go +++ b/consensus/hotstuff/timeoutcollector/timeout_processor.go @@ -290,13 +290,20 @@ func (p *TimeoutProcessor) buildTC() (*flow.TimeoutCertificate, error) { // than the data stored in `sigAggregator`. newestQC := p.newestQCTracker.NewestQC() - return &flow.TimeoutCertificate{ - View: p.view, - NewestQCViews: newestQCViews, - NewestQC: newestQC, - SignerIndices: signerIndices, - SigData: aggregatedSig, - }, nil + tc, err := flow.NewTimeoutCertificate( + flow.UntrustedTimeoutCertificate{ + View: p.view, + NewestQCViews: newestQCViews, + NewestQC: newestQC, + SignerIndices: signerIndices, + SigData: aggregatedSig, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct timeout certificate: %w", err) + } + + return tc, nil } // signerIndicesFromIdentities encodes identities into signer indices. diff --git a/consensus/hotstuff/timeoutcollector/timeout_processor_test.go b/consensus/hotstuff/timeoutcollector/timeout_processor_test.go index 309d1351b98..8178ff35b5b 100644 --- a/consensus/hotstuff/timeoutcollector/timeout_processor_test.go +++ b/consensus/hotstuff/timeoutcollector/timeout_processor_test.go @@ -412,7 +412,7 @@ func (s *TimeoutProcessorTestSuite) TestProcess_ConcurrentCreatingTC() { }) } // don't care about actual data - s.sigAggregator.On("Aggregate").Return(signersData, crypto.Signature{}, nil) + s.sigAggregator.On("Aggregate").Return(signersData, unittest.SignatureFixture(), nil) var startupWg, shutdownWg sync.WaitGroup diff --git a/consensus/hotstuff/validator/validator.go b/consensus/hotstuff/validator/validator.go index 597f0b5360f..1e4f9c53ac1 100644 --- a/consensus/hotstuff/validator/validator.go +++ b/consensus/hotstuff/validator/validator.go @@ -206,7 +206,11 @@ func (v *Validator) ValidateProposal(proposal *model.SignedProposal) error { block := proposal.Block // validate the proposer's vote and get his identity - _, err := v.ValidateVote(proposal.ProposerVote()) + vote, err := proposal.ProposerVote() + if err != nil { + return fmt.Errorf("could not get vote from proposer vote: %w", err) + } + _, err = v.ValidateVote(vote) if model.IsInvalidVoteError(err) { return model.NewInvalidProposalErrorf(proposal, "invalid proposer signature: %w", err) } diff --git a/consensus/hotstuff/validator/validator_test.go b/consensus/hotstuff/validator/validator_test.go index 6c7e91ad0fa..725aedac470 100644 --- a/consensus/hotstuff/validator/validator_test.go +++ b/consensus/hotstuff/validator/validator_test.go @@ -71,7 +71,9 @@ func (ps *ProposalSuite) SetupTest() { ps.voters = ps.participants.Filter(filter.HasNodeID[flow.Identity](voterIDs...)).ToSkeleton() ps.proposal = helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal(helper.WithBlock(ps.block)))) - ps.vote = ps.proposal.ProposerVote() + vote, err := ps.proposal.ProposerVote() + require.NoError(ps.T(), err) + ps.vote = vote ps.voter = ps.leader // set up the mocked hotstuff Replicas state diff --git a/consensus/hotstuff/verification/combined_signer_v2.go b/consensus/hotstuff/verification/combined_signer_v2.go index afed7073f94..0879d9ee722 100644 --- a/consensus/hotstuff/verification/combined_signer_v2.go +++ b/consensus/hotstuff/verification/combined_signer_v2.go @@ -61,12 +61,14 @@ func (c *CombinedSigner) CreateVote(block *model.Block) (*model.Vote, error) { return nil, fmt.Errorf("could not create signature: %w", err) } - // create the vote - vote := &model.Vote{ + vote, err := model.NewVote(model.UntrustedVote{ View: block.View, BlockID: block.BlockID, SignerID: c.staking.NodeID(), SigData: sigData, + }) + if err != nil { + return nil, fmt.Errorf("could not create vote: %w", err) } return vote, nil @@ -82,13 +84,20 @@ func (c *CombinedSigner) CreateTimeout(curView uint64, newestQC *flow.QuorumCert return nil, fmt.Errorf("could not generate signature for timeout object at view %d: %w", curView, err) } - timeout := &model.TimeoutObject{ - View: curView, - NewestQC: newestQC, - LastViewTC: lastViewTC, - SignerID: c.staking.NodeID(), - SigData: sigData, + timeout, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject{ + View: curView, + NewestQC: newestQC, + LastViewTC: lastViewTC, + SignerID: c.staking.NodeID(), + SigData: sigData, + TimeoutTick: 0, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct timeout object: %w", err) } + return timeout, nil } diff --git a/consensus/hotstuff/verification/combined_signer_v3.go b/consensus/hotstuff/verification/combined_signer_v3.go index a496af91387..8bca7934db9 100644 --- a/consensus/hotstuff/verification/combined_signer_v3.go +++ b/consensus/hotstuff/verification/combined_signer_v3.go @@ -62,12 +62,14 @@ func (c *CombinedSignerV3) CreateVote(block *model.Block) (*model.Vote, error) { return nil, fmt.Errorf("could not create signature: %w", err) } - // create the vote - vote := &model.Vote{ + vote, err := model.NewVote(model.UntrustedVote{ View: block.View, BlockID: block.BlockID, SignerID: c.staking.NodeID(), SigData: sigData, + }) + if err != nil { + return nil, fmt.Errorf("could not create vote: %w", err) } return vote, nil @@ -83,13 +85,20 @@ func (c *CombinedSignerV3) CreateTimeout(curView uint64, newestQC *flow.QuorumCe return nil, fmt.Errorf("could not generate signature for timeout object at view %d: %w", curView, err) } - timeout := &model.TimeoutObject{ - View: curView, - NewestQC: newestQC, - LastViewTC: lastViewTC, - SignerID: c.staking.NodeID(), - SigData: sigData, + timeout, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject{ + View: curView, + NewestQC: newestQC, + LastViewTC: lastViewTC, + SignerID: c.staking.NodeID(), + SigData: sigData, + TimeoutTick: 0, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct timeout object: %w", err) } + return timeout, nil } diff --git a/consensus/hotstuff/verification/combined_signer_v3_test.go b/consensus/hotstuff/verification/combined_signer_v3_test.go index 9317268ce59..c98e9a70581 100644 --- a/consensus/hotstuff/verification/combined_signer_v3_test.go +++ b/consensus/hotstuff/verification/combined_signer_v3_test.go @@ -338,7 +338,7 @@ func generateAggregatedSignature(t *testing.T, n int, msg []byte, tag string) ([ // generateSignature creates a single private BLS 12-381 key, signs the provided `message` with // using domain separation `tag` and return the private key and signature. func generateSignature(t *testing.T, message []byte, tag string) (crypto.PrivateKey, crypto.Signature) { - priv := unittest.PrivateKeyFixture(crypto.BLSBLS12381, crypto.KeyGenSeedMinLen) + priv := unittest.PrivateKeyFixture(crypto.BLSBLS12381) sig, err := priv.Sign(message, msig.NewBLSHasher(tag)) require.NoError(t, err) return priv, sig diff --git a/consensus/hotstuff/verification/staking_signer.go b/consensus/hotstuff/verification/staking_signer.go index cc1b9ca1291..8e4c9bfcfd5 100644 --- a/consensus/hotstuff/verification/staking_signer.go +++ b/consensus/hotstuff/verification/staking_signer.go @@ -49,12 +49,14 @@ func (c *StakingSigner) CreateVote(block *model.Block) (*model.Vote, error) { return nil, fmt.Errorf("could not create signature: %w", err) } - // create the vote - vote := &model.Vote{ + vote, err := model.NewVote(model.UntrustedVote{ View: block.View, BlockID: block.BlockID, SignerID: c.signerID, SigData: sigData, + }) + if err != nil { + return nil, fmt.Errorf("could not create vote: %w", err) } return vote, nil @@ -69,13 +71,20 @@ func (c *StakingSigner) CreateTimeout(curView uint64, newestQC *flow.QuorumCerti return nil, fmt.Errorf("could not generate signature for timeout object at view %d: %w", curView, err) } - timeout := &model.TimeoutObject{ - View: curView, - NewestQC: newestQC, - LastViewTC: lastViewTC, - SignerID: c.signerID, - SigData: sigData, + timeout, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject{ + View: curView, + NewestQC: newestQC, + LastViewTC: lastViewTC, + SignerID: c.signerID, + SigData: sigData, + TimeoutTick: 0, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct timeout object: %w", err) } + return timeout, nil } diff --git a/consensus/hotstuff/vote_test.go b/consensus/hotstuff/vote_test.go new file mode 100644 index 00000000000..6afedf98038 --- /dev/null +++ b/consensus/hotstuff/vote_test.go @@ -0,0 +1,85 @@ +package hotstuff + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewVote verifies that NewVote correctly constructs a Vote from valid input +// and returns an error when any required field is missing. +// It covers: +// - valid vote creation +// - missing BlockID +// - missing SignerID +// - missing SigData +func TestNewVote(t *testing.T) { + const validView = uint64(1) + + t.Run("valid vote", func(t *testing.T) { + blockID := unittest.IdentifierFixture() + signerID := unittest.IdentifierFixture() + sigData := []byte{0, 1, 2} + + uv := model.UntrustedVote{ + View: validView, + BlockID: blockID, + SignerID: signerID, + SigData: sigData, + } + + v, err := model.NewVote(uv) + assert.NoError(t, err) + assert.NotNil(t, v) + assert.Equal(t, validView, v.View) + assert.Equal(t, blockID, v.BlockID) + assert.Equal(t, signerID, v.SignerID) + assert.Equal(t, sigData, v.SigData) + }) + + t.Run("empty BlockID", func(t *testing.T) { + uv := model.UntrustedVote{ + View: validView, + BlockID: flow.ZeroID, + SignerID: unittest.IdentifierFixture(), + SigData: []byte{0, 1, 2}, + } + + v, err := model.NewVote(uv) + assert.Error(t, err) + assert.Nil(t, v) + assert.Contains(t, err.Error(), "BlockID") + }) + + t.Run("empty SignerID", func(t *testing.T) { + uv := model.UntrustedVote{ + View: validView, + BlockID: unittest.IdentifierFixture(), + SignerID: flow.ZeroID, + SigData: []byte{0, 1, 2}, + } + + v, err := model.NewVote(uv) + assert.Error(t, err) + assert.Nil(t, v) + assert.Contains(t, err.Error(), "SignerID") + }) + + t.Run("empty SigData", func(t *testing.T) { + uv := model.UntrustedVote{ + View: validView, + BlockID: unittest.IdentifierFixture(), + SignerID: unittest.IdentifierFixture(), + SigData: nil, + } + + v, err := model.NewVote(uv) + assert.Error(t, err) + assert.Nil(t, v) + assert.Contains(t, err.Error(), "SigData") + }) +} diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v2.go b/consensus/hotstuff/votecollector/combined_vote_processor_v2.go index 4ae450450ab..c1b4edececa 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v2.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v2.go @@ -308,10 +308,15 @@ func buildQCWithPackerAndSigData( return nil, fmt.Errorf("could not pack the block sig data: %w", err) } - return &flow.QuorumCertificate{ + qc, err := flow.NewQuorumCertificate(flow.UntrustedQuorumCertificate{ View: block.View, BlockID: block.BlockID, SignerIndices: signerIndices, SigData: sigData, - }, nil + }) + if err != nil { + return nil, fmt.Errorf("could not build quorum certificate: %w", err) + } + + return qc, nil } diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v3.go b/consensus/hotstuff/votecollector/combined_vote_processor_v3.go index c444ed1ac75..e47234421be 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v3.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v3.go @@ -324,10 +324,15 @@ func (p *CombinedVoteProcessorV3) buildQC() (*flow.QuorumCertificate, error) { return nil, fmt.Errorf("could not pack the block sig data: %w", err) } - return &flow.QuorumCertificate{ + qc, err := flow.NewQuorumCertificate(flow.UntrustedQuorumCertificate{ View: p.block.View, BlockID: p.block.BlockID, SignerIndices: signerIndices, SigData: sigData, - }, nil + }) + if err != nil { + return nil, fmt.Errorf("could not build quorum certificate: %w", err) + } + + return qc, nil } diff --git a/consensus/hotstuff/votecollector/factory.go b/consensus/hotstuff/votecollector/factory.go index b444bc35ca7..29a5ef00abe 100644 --- a/consensus/hotstuff/votecollector/factory.go +++ b/consensus/hotstuff/votecollector/factory.go @@ -46,7 +46,12 @@ func (f *VoteProcessorFactory) Create(log zerolog.Logger, proposal *model.Signed return nil, fmt.Errorf("instantiating vote processor for block %v failed: %w", proposal.Block.BlockID, err) } - err = processor.Process(proposal.ProposerVote()) + vote, err := proposal.ProposerVote() + if err != nil { + return nil, fmt.Errorf("could not get vote from proposer vote: %w", err) + } + + err = processor.Process(vote) if err != nil { if model.IsInvalidVoteError(err) { return nil, model.NewInvalidProposalErrorf(proposal, "invalid proposer vote: %w", err) diff --git a/consensus/hotstuff/votecollector/factory_test.go b/consensus/hotstuff/votecollector/factory_test.go index 40207150e86..7102967e956 100644 --- a/consensus/hotstuff/votecollector/factory_test.go +++ b/consensus/hotstuff/votecollector/factory_test.go @@ -21,7 +21,9 @@ func TestVoteProcessorFactory_CreateWithValidProposal(t *testing.T) { proposal := helper.MakeSignedProposal() mockedProcessor := &mockhotstuff.VerifyingVoteProcessor{} - mockedProcessor.On("Process", proposal.ProposerVote()).Return(nil).Once() + vote, err := proposal.ProposerVote() + require.NoError(t, err) + mockedProcessor.On("Process", vote).Return(nil).Once() mockedFactory.On("Create", unittest.Logger(), proposal).Return(mockedProcessor, nil).Once() voteProcessorFactory := &VoteProcessorFactory{ @@ -46,7 +48,9 @@ func TestVoteProcessorFactory_CreateWithInvalidVote(t *testing.T) { t.Run("invalid-vote", func(t *testing.T) { proposal := helper.MakeSignedProposal() mockedProcessor := &mockhotstuff.VerifyingVoteProcessor{} - mockedProcessor.On("Process", proposal.ProposerVote()).Return(model.NewInvalidVoteErrorf(proposal.ProposerVote(), "")).Once() + vote, err := proposal.ProposerVote() + require.NoError(t, err) + mockedProcessor.On("Process", vote).Return(model.NewInvalidVoteErrorf(vote, "")).Once() mockedFactory.On("Create", unittest.Logger(), proposal).Return(mockedProcessor, nil).Once() voteProcessorFactory := &VoteProcessorFactory{ @@ -66,7 +70,9 @@ func TestVoteProcessorFactory_CreateWithInvalidVote(t *testing.T) { proposal := helper.MakeSignedProposal() mockedProcessor := &mockhotstuff.VerifyingVoteProcessor{} exception := errors.New("process-exception") - mockedProcessor.On("Process", proposal.ProposerVote()).Return(exception).Once() + vote, err := proposal.ProposerVote() + require.NoError(t, err) + mockedProcessor.On("Process", vote).Return(exception).Once() mockedFactory.On("Create", unittest.Logger(), proposal).Return(mockedProcessor, nil).Once() diff --git a/consensus/hotstuff/votecollector/staking_vote_processor.go b/consensus/hotstuff/votecollector/staking_vote_processor.go index 72e4f21e6e6..cd9814a6d96 100644 --- a/consensus/hotstuff/votecollector/staking_vote_processor.go +++ b/consensus/hotstuff/votecollector/staking_vote_processor.go @@ -179,12 +179,17 @@ func (p *StakingVoteProcessor) buildQC() (*flow.QuorumCertificate, error) { return nil, fmt.Errorf("could not encode signer indices: %w", err) } - return &flow.QuorumCertificate{ + qc, err := flow.NewQuorumCertificate(flow.UntrustedQuorumCertificate{ View: p.block.View, BlockID: p.block.BlockID, SignerIndices: signerIndices, SigData: aggregatedStakingSig, - }, nil + }) + if err != nil { + return nil, fmt.Errorf("could not build quorum certificate: %w", err) + } + + return qc, nil } func (p *StakingVoteProcessor) signerIndicesFromIdentities(signerIDs flow.IdentifierList) ([]byte, error) { diff --git a/consensus/integration/epoch_test.go b/consensus/integration/epoch_test.go index dcdc416b0bc..17260179ffa 100644 --- a/consensus/integration/epoch_test.go +++ b/consensus/integration/epoch_test.go @@ -98,7 +98,7 @@ func TestStaticEpochTransition(t *testing.T) { func TestEpochTransition_IdentitiesOverlap(t *testing.T) { // must finalize 8 blocks, we specify the epoch transition after 4 views stopper := NewStopper(8, 0) - privateNodeInfos := createPrivateNodeIdentities(4) + privateNodeInfos := createPrivateNodeIdentities(t, 4) firstEpochConsensusParticipants := completeConsensusIdentities(t, privateNodeInfos[:3]) rootSnapshot := createRootSnapshot(t, firstEpochConsensusParticipants) consensusParticipants := NewConsensusParticipants(firstEpochConsensusParticipants) @@ -257,11 +257,16 @@ func withNextEpoch( // Construct the new epoch protocol state entry epochStateEntry, err := flow.NewEpochStateEntry( - minEpochStateEntry, - rootProtocolState.EpochEntry.PreviousEpochSetup, - rootProtocolState.EpochEntry.PreviousEpochCommit, - currEpochSetup, currEpochCommit, - nextEpochSetup, nextEpochCommit) + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: minEpochStateEntry, + PreviousEpochSetup: rootProtocolState.EpochEntry.PreviousEpochSetup, + PreviousEpochCommit: rootProtocolState.EpochEntry.PreviousEpochCommit, + CurrentEpochSetup: currEpochSetup, + CurrentEpochCommit: currEpochCommit, + NextEpochSetup: nextEpochSetup, + NextEpochCommit: nextEpochCommit, + }, + ) require.NoError(t, err) // Re-construct epoch protocol state with modified events (constructs ActiveIdentity fields) epochRichProtocolState, err := flow.NewRichEpochStateEntry(epochStateEntry) diff --git a/consensus/integration/nodes_test.go b/consensus/integration/nodes_test.go index e164de7a874..80eb5b21bd8 100644 --- a/consensus/integration/nodes_test.go +++ b/consensus/integration/nodes_test.go @@ -3,12 +3,12 @@ package integration_test import ( "context" "fmt" + "io" "os" "sort" "testing" "time" - "github.com/dgraph-io/badger/v2" "github.com/gammazero/workerpool" "github.com/onflow/crypto" "github.com/rs/zerolog" @@ -58,7 +58,7 @@ import ( "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" "github.com/onflow/flow-go/state/protocol/util" - storage "github.com/onflow/flow-go/storage/badger" + fstorage "github.com/onflow/flow-go/storage" storagemock "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/store" @@ -138,7 +138,8 @@ func (p *ConsensusParticipants) Update(epochCounter uint64, data *run.Participan } type Node struct { - db *badger.DB + db fstorage.DB + dbCloser io.Closer dbDir string index int log zerolog.Logger @@ -151,7 +152,7 @@ type Node struct { timeoutAggregator hotstuff.TimeoutAggregator messageHub *message_hub.MessageHub state *bprotocol.ParticipantState - headers *storage.Headers + headers fstorage.Headers net *Network } @@ -285,8 +286,9 @@ func createRootBlockData(t *testing.T, participantData *run.ParticipantData) (*f commit.DKGIndexMap = dkgIndexMap }, ) - - epochProtocolStateID := inmem.EpochProtocolStateFromServiceEvents(setup, commit).ID() + minEpochStateEntry, err := inmem.EpochProtocolStateFromServiceEvents(setup, commit) + require.NoError(t, err) + epochProtocolStateID := minEpochStateEntry.ID() safetyParams, err := protocol.DefaultEpochSafetyParams(root.Header.ChainID) require.NoError(t, err) rootProtocolState, err := kvstore.NewDefaultKVStore(safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, epochProtocolStateID) @@ -300,13 +302,13 @@ func createRootBlockData(t *testing.T, participantData *run.ParticipantData) (*f return root, result, seal } -func createPrivateNodeIdentities(n int) []bootstrap.NodeInfo { +func createPrivateNodeIdentities(t *testing.T, n int) []bootstrap.NodeInfo { consensus := unittest.IdentityListFixture(n, unittest.WithRole(flow.RoleConsensus)).Sort(flow.Canonical[flow.Identity]) infos := make([]bootstrap.NodeInfo, 0, n) for _, node := range consensus { networkPrivKey := unittest.NetworkingPrivKeyFixture() stakingPrivKey := unittest.StakingPrivKeyFixture() - nodeInfo := bootstrap.NewPrivateNodeInfo( + nodeInfo, err := bootstrap.NewPrivateNodeInfo( node.NodeID, node.Role, node.Address, @@ -314,6 +316,7 @@ func createPrivateNodeIdentities(n int) []bootstrap.NodeInfo { networkPrivKey, stakingPrivKey, ) + require.NoError(t, err) infos = append(infos, nodeInfo) } return infos @@ -321,7 +324,7 @@ func createPrivateNodeIdentities(n int) []bootstrap.NodeInfo { func createConsensusIdentities(t *testing.T, n int) *run.ParticipantData { // create n consensus node participants - consensus := createPrivateNodeIdentities(n) + consensus := createPrivateNodeIdentities(t, n) return completeConsensusIdentities(t, consensus) } @@ -372,26 +375,28 @@ func createNode( epochLookup module.EpochLookup, ) *Node { - db, dbDir := unittest.TempBadgerDB(t) + badgerdb, dbDir := unittest.TempBadgerDB(t) metricsCollector := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() - - headersDB := storage.NewHeaders(metricsCollector, db) - guaranteesDB := storage.NewGuarantees(metricsCollector, db, storage.DefaultCacheSize) - sealsDB := storage.NewSeals(metricsCollector, db) - indexDB := storage.NewIndex(metricsCollector, db) - resultsDB := storage.NewExecutionResults(metricsCollector, db) - receiptsDB := storage.NewExecutionReceipts(metricsCollector, db, resultsDB, storage.DefaultCacheSize) - payloadsDB := storage.NewPayloads(db, indexDB, guaranteesDB, sealsDB, receiptsDB, resultsDB) - blocksDB := storage.NewBlocks(db, headersDB, payloadsDB) - qcsDB := storage.NewQuorumCertificates(metricsCollector, db, storage.DefaultCacheSize) - setupsDB := storage.NewEpochSetups(metricsCollector, db) - commitsDB := storage.NewEpochCommits(metricsCollector, db) - protocolStateDB := storage.NewEpochProtocolStateEntries(metricsCollector, setupsDB, commitsDB, db, - storage.DefaultEpochProtocolStateCacheSize, storage.DefaultProtocolStateIndexCacheSize) - protocokKVStoreDB := storage.NewProtocolKVStore(metricsCollector, db, - storage.DefaultProtocolKVStoreCacheSize, storage.DefaultProtocolKVStoreByBlockIDCacheSize) - versionBeaconDB := store.NewVersionBeacons(badgerimpl.ToDB(db)) + db := badgerimpl.ToDB(badgerdb) + lockManager := fstorage.NewTestingLockManager() + + headersDB := store.NewHeaders(metricsCollector, db) + guaranteesDB := store.NewGuarantees(metricsCollector, db, store.DefaultCacheSize) + sealsDB := store.NewSeals(metricsCollector, db) + indexDB := store.NewIndex(metricsCollector, db) + resultsDB := store.NewExecutionResults(metricsCollector, db) + receiptsDB := store.NewExecutionReceipts(metricsCollector, db, resultsDB, store.DefaultCacheSize) + payloadsDB := store.NewPayloads(db, indexDB, guaranteesDB, sealsDB, receiptsDB, resultsDB) + blocksDB := store.NewBlocks(db, headersDB, payloadsDB) + qcsDB := store.NewQuorumCertificates(metricsCollector, db, store.DefaultCacheSize) + setupsDB := store.NewEpochSetups(metricsCollector, db) + commitsDB := store.NewEpochCommits(metricsCollector, db) + protocolStateDB := store.NewEpochProtocolStateEntries(metricsCollector, setupsDB, commitsDB, db, + store.DefaultEpochProtocolStateCacheSize, store.DefaultProtocolStateIndexCacheSize) + protocokKVStoreDB := store.NewProtocolKVStore(metricsCollector, db, + store.DefaultProtocolKVStoreCacheSize, store.DefaultProtocolKVStoreByBlockIDCacheSize) + versionBeaconDB := store.NewVersionBeacons(db) protocolStateEvents := events.NewDistributor() localID := identity.ID() @@ -404,6 +409,7 @@ func createNode( state, err := bprotocol.Bootstrap( metricsCollector, db, + lockManager, headersDB, sealsDB, resultsDB, @@ -517,7 +523,7 @@ func createNode( protocolStateEvents.AddConsumer(committee) // initialize the block finalizer - final := finalizer.NewFinalizer(db, headersDB, fullState, trace.NewNoopTracer()) + final := finalizer.NewFinalizer(db.Reader(), headersDB, fullState, trace.NewNoopTracer()) syncCore, err := synccore.New(log, synccore.DefaultConfig(), metricsCollector, rootHeader.ChainID) require.NoError(t, err) @@ -552,7 +558,7 @@ func createNode( signer := verification.NewCombinedSigner(me, beaconKeyStore) - persist, err := persister.New(badgerimpl.ToDB(db), rootHeader.ChainID) + persist, err := persister.New(db, rootHeader.ChainID) require.NoError(t, err) livenessData, err := persist.GetLivenessData() @@ -706,6 +712,7 @@ func createNode( hotstuffDistributor.AddConsumer(messageHub) + node.dbCloser = badgerdb node.compliance = comp node.sync = sync node.state = fullState @@ -723,7 +730,7 @@ func createNode( func cleanupNodes(nodes []*Node) { for _, n := range nodes { - _ = n.db.Close() + _ = n.dbCloser.Close() _ = os.RemoveAll(n.dbDir) } } diff --git a/consensus/recovery/protocol/state_test.go b/consensus/recovery/protocol/state_test.go index a0c266f9548..4b4c968c867 100644 --- a/consensus/recovery/protocol/state_test.go +++ b/consensus/recovery/protocol/state_test.go @@ -12,7 +12,8 @@ import ( "github.com/onflow/flow-go/module/metrics" protocol "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/util" - bstorage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -44,7 +45,7 @@ func TestSaveBlockAsReplica(t *testing.T) { require.NoError(t, err) metrics := metrics.NewNoopCollector() - headers := bstorage.NewHeaders(metrics, db) + headers := store.NewHeaders(metrics, badgerimpl.ToDB(db)) finalized, pending, err := recovery.FindLatest(state, headers) require.NoError(t, err) require.Equal(t, b0.ID(), finalized.ID(), "recover find latest returns inconsistent finalized block") diff --git a/deploy/systemd-docker/example-node-infos.pub.json b/deploy/systemd-docker/example-node-infos.pub.json index cfba2da97fc..a60dcf48625 100644 --- a/deploy/systemd-docker/example-node-infos.pub.json +++ b/deploy/systemd-docker/example-node-infos.pub.json @@ -5,7 +5,8 @@ "NodeID": "9bbea0644b5e91ec66b4afddef7083e896da545d530ee52b85c78afa88301556", "Weight": 1000, "NetworkPubKey": "...", - "StakingPubKey": "..." + "StakingPubKey": "...", + "StakingKeyPoP": "..." }, { "Role": "consensus", @@ -13,7 +14,8 @@ "NodeID": "9bbea0644b5e91ec66b4afddef7083e896da545d530ee52b85c78afa88301556", "Weight": 1000, "NetworkPubKey": "...", - "StakingPubKey": "..." + "StakingPubKey": "...", + "StakingKeyPoP": "..." }, { "Role": "execution", @@ -21,7 +23,8 @@ "NodeID": "9bbea0644b5e91ec66b4afddef7083e896da545d530ee52b85c78afa88301556", "Weight": 1000, "NetworkPubKey": "...", - "StakingPubKey": "..." + "StakingPubKey": "...", + "StakingKeyPoP": "..." }, { "Role": "verification", @@ -29,6 +32,7 @@ "NodeID": "9bbea0644b5e91ec66b4afddef7083e896da545d530ee52b85c78afa88301556", "Weight": 1000, "NetworkPubKey": "...", - "StakingPubKey": "..." + "StakingPubKey": "...", + "StakingKeyPoP": "..." } ] diff --git a/engine/access/access_test.go b/engine/access/access_test.go index 4c2ed19be48..6c6439e8310 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -8,6 +8,7 @@ import ( "github.com/dgraph-io/badger/v2" "github.com/google/go-cmp/cmp" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -24,6 +25,9 @@ import ( accessmock "github.com/onflow/flow-go/engine/access/mock" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/events" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" "github.com/onflow/flow-go/engine/access/subscription" commonrpc "github.com/onflow/flow-go/engine/common/rpc" @@ -44,7 +48,7 @@ import ( protocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/storage" bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/operation" "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/storage/util" @@ -80,6 +84,7 @@ type Suite struct { backend *backend.Backend sporkID flow.Identifier protocolStateVersion uint64 + lockManager lockctx.Manager } // TestAccess tests scenarios which exercise multiple API calls using both the RPC handler and the ingest engine @@ -89,6 +94,7 @@ func TestAccess(t *testing.T) { } func (suite *Suite) SetupTest() { + suite.lockManager = storage.NewTestingLockManager() suite.log = zerolog.New(os.Stderr) suite.net = new(mocknetwork.Network) suite.state = new(protocol.State) @@ -169,10 +175,13 @@ func (suite *Suite) RunTest( ExecutionReceipts: en.Receipts, ChainID: suite.chainID, AccessMetrics: suite.metrics, - MaxHeightRange: backend.DefaultMaxHeightRange, + MaxHeightRange: events.DefaultMaxHeightRange, Log: suite.log, SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, - Communicator: backend.NewNodeCommunicator(false), + Communicator: node_communicator.NewNodeCommunicator(false), + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, }) require.NoError(suite.T(), err) @@ -290,8 +299,9 @@ func (suite *Suite) TestSendTransactionToRandomCollectionNode() { // create storage metrics := metrics.NewNoopCollector() - transactions := bstorage.NewTransactions(metrics, db) - collections := bstorage.NewCollections(db, transactions) + storedb := badgerimpl.ToDB(db) + transactions := store.NewTransactions(metrics, storedb) + collections := store.NewCollections(storedb, transactions) // create collection node cluster count := 2 @@ -337,10 +347,13 @@ func (suite *Suite) TestSendTransactionToRandomCollectionNode() { ChainID: suite.chainID, AccessMetrics: metrics, ConnFactory: connFactory, - MaxHeightRange: backend.DefaultMaxHeightRange, + MaxHeightRange: events.DefaultMaxHeightRange, Log: suite.log, SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, - Communicator: backend.NewNodeCommunicator(false), + Communicator: node_communicator.NewNodeCommunicator(false), + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, }) require.NoError(suite.T(), err) @@ -389,12 +402,27 @@ func (suite *Suite) TestGetBlockByIDAndHeight() { block2 := unittest.BlockFixture() block2.Header.Height = 2 - require.NoError(suite.T(), all.Blocks.Store(&block1)) - require.NoError(suite.T(), all.Blocks.Store(&block2)) + bdb := badgerimpl.ToDB(db) + lctx := suite.lockManager.NewContext() + require.NoError(suite.T(), lctx.AcquireLock(storage.LockInsertBlock)) + require.NoError(suite.T(), bdb.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + if err := all.Blocks.BatchStore(lctx, rw, &block1); err != nil { + return err + } + if err := all.Blocks.BatchStore(lctx, rw, &block2); err != nil { + return err + } + return nil + })) + lctx.Release() + fctx := suite.lockManager.NewContext() + require.NoError(suite.T(), fctx.AcquireLock(storage.LockFinalizeBlock)) // the follower logic should update height index on the block storage when a block is finalized - err := db.Update(operation.IndexBlockHeight(block2.Header.Height, block2.ID())) - require.NoError(suite.T(), err) + require.NoError(suite.T(), bdb.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(fctx, rw, block2.Header.Height, block2.ID()) + })) + fctx.Release() assertHeaderResp := func( resp *accessproto.BlockHeaderResponse, @@ -635,8 +663,9 @@ func (suite *Suite) TestGetSealedTransaction() { // initialize storage metrics := metrics.NewNoopCollector() - transactions := bstorage.NewTransactions(metrics, db) - collections := bstorage.NewCollections(db, transactions) + storedb := badgerimpl.ToDB(db) + transactions := store.NewTransactions(metrics, storedb) + collections := store.NewCollections(storedb, transactions) collectionsToMarkFinalized, err := stdmap.NewTimes(100) require.NoError(suite.T(), err) collectionsToMarkExecuted, err := stdmap.NewTimes(100) @@ -666,11 +695,13 @@ func (suite *Suite) TestGetSealedTransaction() { ChainID: suite.chainID, AccessMetrics: suite.metrics, ConnFactory: connFactory, - MaxHeightRange: backend.DefaultMaxHeightRange, + MaxHeightRange: events.DefaultMaxHeightRange, Log: suite.log, SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, - Communicator: backend.NewNodeCommunicator(false), - TxResultQueryMode: backend.IndexQueryModeExecutionNodesOnly, + Communicator: node_communicator.NewNodeCommunicator(false), + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, ExecNodeIdentitiesProvider: execNodeIdentitiesProvider, }) require.NoError(suite.T(), err) @@ -697,31 +728,48 @@ func (suite *Suite) TestGetSealedTransaction() { // create the ingest engine processedHeight := store.NewConsumerProgress(badgerimpl.ToDB(db), module.ConsumeProgressIngestionEngineBlockHeight) + collectionSyncer := ingestion.NewCollectionSyncer( + suite.log, + module.CollectionExecutedMetric(collectionExecutedMetric), + suite.request, + suite.state, + all.Blocks, + collections, + transactions, + lastFullBlockHeight, + suite.lockManager, + ) + ingestEng, err := ingestion.New( suite.log, suite.net, suite.state, suite.me, - suite.request, all.Blocks, - all.Headers, - collections, - transactions, en.Results, en.Receipts, - collectionExecutedMetric, processedHeight, - lastFullBlockHeight, + collectionSyncer, + collectionExecutedMetric, nil, ) require.NoError(suite.T(), err) // 1. Assume that follower engine updated the block storage and the protocol state. The block is reported as sealed - err = all.Blocks.Store(block) - require.NoError(suite.T(), err) - - err = db.Update(operation.IndexBlockHeight(block.Header.Height, block.ID())) - require.NoError(suite.T(), err) + bdb := badgerimpl.ToDB(db) + lctx := suite.lockManager.NewContext() + require.NoError(suite.T(), lctx.AcquireLock(storage.LockInsertBlock)) + require.NoError(suite.T(), bdb.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return all.Blocks.BatchStore(lctx, rw, block) + })) + lctx.Release() + + fctx := suite.lockManager.NewContext() + require.NoError(suite.T(), fctx.AcquireLock(storage.LockFinalizeBlock)) + require.NoError(suite.T(), bdb.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(fctx, rw, block.Header.Height, block.ID()) + })) + fctx.Release() suite.sealedBlock = block.Header @@ -741,8 +789,14 @@ func (suite *Suite) TestGetSealedTransaction() { // 3. Request engine is used to request missing collection suite.request.On("EntityByID", collection.ID(), mock.Anything).Return() - // 4. Indexer HandleCollection receives the requested collection and all the execution receipts - err = indexer.HandleCollection(collection, collections, transactions, suite.log, collectionExecutedMetric) + // 4. Indexer IndexCollection receives the requested collection and all the execution receipts + // Create a lock context for indexing + indexLctx := suite.lockManager.NewContext() + lockErr := indexLctx.AcquireLock(storage.LockInsertCollection) + require.NoError(suite.T(), lockErr) + defer indexLctx.Release() + + err = indexer.IndexCollection(indexLctx, collection, collections, suite.log, module.CollectionExecutedMetric(collectionExecutedMetric)) require.NoError(suite.T(), err) for _, r := range executionReceipts { @@ -789,10 +843,20 @@ func (suite *Suite) TestGetTransactionResult() { // specifically for this test we will consider that sealed block is far behind finalized, so we get EXECUTED status suite.sealedSnapshot.On("Head").Return(sealedBlock, nil) - err := all.Blocks.Store(block) - require.NoError(suite.T(), err) - err = all.Blocks.Store(blockNegative) - require.NoError(suite.T(), err) + bdb := badgerimpl.ToDB(db) + lctx := suite.lockManager.NewContext() + require.NoError(suite.T(), lctx.AcquireLock(storage.LockInsertBlock)) + require.NoError(suite.T(), bdb.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return all.Blocks.BatchStore(lctx, rw, block) + })) + lctx.Release() + + lctx2 := suite.lockManager.NewContext() + require.NoError(suite.T(), lctx2.AcquireLock(storage.LockInsertBlock)) + require.NoError(suite.T(), bdb.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return all.Blocks.BatchStore(lctx2, rw, blockNegative) + })) + lctx2.Release() suite.state.On("AtBlockID", blockId).Return(suite.sealedSnapshot) @@ -822,9 +886,10 @@ func (suite *Suite) TestGetTransactionResult() { // initialize storage metrics := metrics.NewNoopCollector() - transactions := bstorage.NewTransactions(metrics, db) - collections := bstorage.NewCollections(db, transactions) - err = collections.Store(collectionNegative) + storedb := badgerimpl.ToDB(db) + transactions := store.NewTransactions(metrics, storedb) + collections := store.NewCollections(storedb, transactions) + _, err := collections.Store(collectionNegative) require.NoError(suite.T(), err) collectionsToMarkFinalized, err := stdmap.NewTimes(100) require.NoError(suite.T(), err) @@ -854,11 +919,13 @@ func (suite *Suite) TestGetTransactionResult() { ChainID: suite.chainID, AccessMetrics: suite.metrics, ConnFactory: connFactory, - MaxHeightRange: backend.DefaultMaxHeightRange, + MaxHeightRange: events.DefaultMaxHeightRange, Log: suite.log, SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, - Communicator: backend.NewNodeCommunicator(false), - TxResultQueryMode: backend.IndexQueryModeExecutionNodesOnly, + Communicator: node_communicator.NewNodeCommunicator(false), + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, ExecNodeIdentitiesProvider: execNodeIdentitiesProvider, }) require.NoError(suite.T(), err) @@ -886,22 +953,29 @@ func (suite *Suite) TestGetTransactionResult() { lastFullBlockHeight, err := counters.NewPersistentStrictMonotonicCounter(lastFullBlockHeightProgress) require.NoError(suite.T(), err) - // create the ingest engine + collectionSyncer := ingestion.NewCollectionSyncer( + suite.log, + module.CollectionExecutedMetric(collectionExecutedMetric), + suite.request, + suite.state, + all.Blocks, + collections, + transactions, + lastFullBlockHeight, + suite.lockManager, + ) + ingestEng, err := ingestion.New( suite.log, suite.net, suite.state, suite.me, - suite.request, all.Blocks, - all.Headers, - collections, - transactions, en.Results, en.Receipts, - collectionExecutedMetric, processedHeightInitializer, - lastFullBlockHeight, + collectionSyncer, + collectionExecutedMetric, nil, ) require.NoError(suite.T(), err) @@ -928,8 +1002,14 @@ func (suite *Suite) TestGetTransactionResult() { } ingestEng.OnFinalizedBlock(mb) - // Indexer HandleCollection receives the requested collection and all the execution receipts - err = indexer.HandleCollection(collection, collections, transactions, suite.log, collectionExecutedMetric) + // Indexer IndexCollection receives the requested collection and all the execution receipts + // Create a lock context for indexing + indexLctx := suite.lockManager.NewContext() + lockErr := indexLctx.AcquireLock(storage.LockInsertCollection) + require.NoError(suite.T(), lockErr) + defer indexLctx.Release() + + err = indexer.IndexCollection(indexLctx, collection, collections, suite.log, module.CollectionExecutedMetric(collectionExecutedMetric)) require.NoError(suite.T(), err) for _, r := range executionReceipts { @@ -937,8 +1017,12 @@ func (suite *Suite) TestGetTransactionResult() { require.NoError(suite.T(), err) } } - err = db.Update(operation.IndexBlockHeight(block.Header.Height, block.ID())) - require.NoError(suite.T(), err) + fctx2 := suite.lockManager.NewContext() + require.NoError(suite.T(), fctx2.AcquireLock(storage.LockFinalizeBlock)) + require.NoError(suite.T(), bdb.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(fctx2, rw, block.Header.Height, block.ID()) + })) + fctx2.Release() finalSnapshot.On("Head").Return(block.Header, nil) processExecutionReceipts(block, collection, enNodeIDs, originID, ingestEng) @@ -1060,9 +1144,10 @@ func (suite *Suite) TestGetTransactionResult() { // TestExecuteScript tests the three execute Script related calls to make sure that the execution api is called with // the correct block id func (suite *Suite) TestExecuteScript() { - unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { - all := bstorage.InitAll(metrics.NewNoopCollector(), db) - en := util.ExecutionStorageLayer(suite.T(), db) + unittest.RunWithBadgerDB(suite.T(), func(badgerdb *badger.DB) { + db := badgerimpl.ToDB(badgerdb) + all := bstorage.InitAll(metrics.NewNoopCollector(), badgerdb) + en := util.ExecutionStorageLayer(suite.T(), badgerdb) identities := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) suite.sealedSnapshot.On("Identities", mock.Anything).Return(identities, nil) suite.finalSnapshot.On("Identities", mock.Anything).Return(identities, nil) @@ -1092,12 +1177,13 @@ func (suite *Suite) TestExecuteScript() { ChainID: suite.chainID, AccessMetrics: suite.metrics, ConnFactory: connFactory, - MaxHeightRange: backend.DefaultMaxHeightRange, + MaxHeightRange: events.DefaultMaxHeightRange, Log: suite.log, SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, - Communicator: backend.NewNodeCommunicator(false), - ScriptExecutionMode: backend.IndexQueryModeExecutionNodesOnly, - TxResultQueryMode: backend.IndexQueryModeExecutionNodesOnly, + Communicator: node_communicator.NewNodeCommunicator(false), + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, ExecNodeIdentitiesProvider: execNodeIdentitiesProvider, }) require.NoError(suite.T(), err) @@ -1131,31 +1217,38 @@ func (suite *Suite) TestExecuteScript() { suite.net.On("Register", channels.ReceiveReceipts, mock.Anything).Return(conduit, nil). Once() - processedHeightInitializer := store.NewConsumerProgress(badgerimpl.ToDB(db), module.ConsumeProgressIngestionEngineBlockHeight) + processedHeightInitializer := store.NewConsumerProgress(db, module.ConsumeProgressIngestionEngineBlockHeight) - lastFullBlockHeightInitializer := store.NewConsumerProgress(badgerimpl.ToDB(db), module.ConsumeProgressLastFullBlockHeight) + lastFullBlockHeightInitializer := store.NewConsumerProgress(db, module.ConsumeProgressLastFullBlockHeight) lastFullBlockHeightProgress, err := lastFullBlockHeightInitializer.Initialize(suite.rootBlock.Height) require.NoError(suite.T(), err) lastFullBlockHeight, err := counters.NewPersistentStrictMonotonicCounter(lastFullBlockHeightProgress) require.NoError(suite.T(), err) - // create the ingest engine + collectionSyncer := ingestion.NewCollectionSyncer( + suite.log, + module.CollectionExecutedMetric(collectionExecutedMetric), + suite.request, + suite.state, + all.Blocks, + all.Collections, + all.Transactions, + lastFullBlockHeight, + suite.lockManager, + ) + ingestEng, err := ingestion.New( suite.log, suite.net, suite.state, suite.me, - suite.request, all.Blocks, - all.Headers, - all.Collections, - all.Transactions, en.Results, en.Receipts, - collectionExecutedMetric, processedHeightInitializer, - lastFullBlockHeight, + collectionSyncer, + collectionExecutedMetric, nil, ) require.NoError(suite.T(), err) @@ -1165,9 +1258,20 @@ func (suite *Suite) TestExecuteScript() { // create a block and a seal pointing to that block lastBlock := unittest.BlockWithParentFixture(prevBlock.Header) - err = all.Blocks.Store(lastBlock) + lctx := suite.lockManager.NewContext() + require.NoError(suite.T(), lctx.AcquireLock(storage.LockInsertBlock)) + require.NoError(suite.T(), db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return all.Blocks.BatchStore(lctx, rw, lastBlock) + })) + lctx.Release() require.NoError(suite.T(), err) - err = db.Update(operation.IndexBlockHeight(lastBlock.Header.Height, lastBlock.ID())) + + fctx := suite.lockManager.NewContext() + require.NoError(suite.T(), fctx.AcquireLock(storage.LockFinalizeBlock)) + require.NoError(suite.T(), db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(fctx, rw, lastBlock.Header.Height, lastBlock.ID()) + })) + fctx.Release() require.NoError(suite.T(), err) // update latest sealed block suite.sealedBlock = lastBlock.Header @@ -1179,10 +1283,18 @@ func (suite *Suite) TestExecuteScript() { require.NoError(suite.T(), err) } - err = all.Blocks.Store(prevBlock) - require.NoError(suite.T(), err) - err = db.Update(operation.IndexBlockHeight(prevBlock.Header.Height, prevBlock.ID())) - require.NoError(suite.T(), err) + fctx2 := suite.lockManager.NewContext() + require.NoError(suite.T(), fctx2.AcquireLock(storage.LockInsertBlock)) + require.NoError(suite.T(), fctx2.AcquireLock(storage.LockFinalizeBlock)) + require.NoError(suite.T(), db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := all.Blocks.BatchStore(fctx2, rw, prevBlock) + if err != nil { + return err + } + + return operation.IndexFinalizedBlockByHeight(fctx2, rw, prevBlock.Header.Height, prevBlock.ID()) + })) + fctx2.Release() // create execution receipts for each of the execution node and the previous block executionReceipts = unittest.ReceiptsForBlockFixture(prevBlock, identities.NodeIDs()) @@ -1298,12 +1410,20 @@ func (suite *Suite) TestAPICallNodeVersionInfo() { // field in the response matches the finalized header from cache. It also tests that the LastFinalizedBlock field is // updated correctly when a block with a greater height is finalized. func (suite *Suite) TestLastFinalizedBlockHeightResult() { - suite.RunTest(func(handler *rpc.Handler, db *badger.DB, all *storage.All, en *storage.Execution) { + suite.RunTest(func(handler *rpc.Handler, badgerdb *badger.DB, all *storage.All, en *storage.Execution) { block := unittest.BlockWithParentFixture(suite.finalizedBlock) newFinalizedBlock := unittest.BlockWithParentFixture(block.Header) + db := badgerimpl.ToDB(badgerdb) + lctx := suite.lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(suite.T(), err) + defer lctx.Release() + // store new block - require.NoError(suite.T(), all.Blocks.Store(block)) + require.NoError(suite.T(), db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return all.Blocks.BatchStore(lctx, rw, block) + })) assertFinalizedBlockHeader := func(resp *accessproto.BlockHeaderResponse, err error) { require.NoError(suite.T(), err) diff --git a/engine/access/handle_irrecoverable_state_test.go b/engine/access/handle_irrecoverable_state_test.go index 456c5cd97fd..4e81b2592d7 100644 --- a/engine/access/handle_irrecoverable_state_test.go +++ b/engine/access/handle_irrecoverable_state_test.go @@ -26,6 +26,8 @@ import ( "github.com/onflow/flow-go/engine/access/rest/websockets" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/grpcserver" @@ -153,8 +155,11 @@ func (suite *IrrecoverableStateTestSuite) SetupTest() { MaxHeightRange: 0, Log: suite.log, SnapshotHistoryLimit: 0, - Communicator: backend.NewNodeCommunicator(false), + Communicator: node_communicator.NewNodeCommunicator(false), BlockTracker: nil, + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, }) suite.Require().NoError(err) @@ -232,6 +237,7 @@ func (suite *IrrecoverableStateTestSuite) TestRestInconsistentNodeState() { unittest.CollectionGuaranteesWithCollectionIDFixture(collections), ) suite.blocks.On("ByID", blockHeader.ID()).Return(blockHeader, nil) + suite.headers.On("BlockIDByHeight", blockHeader.Header.Height).Return(blockHeader.ID(), nil) err := fmt.Errorf("inconsistent node's state") suite.snapshot.On("Head").Return(nil, err) diff --git a/engine/access/ingestion/collection_syncer.go b/engine/access/ingestion/collection_syncer.go new file mode 100644 index 00000000000..f4fcc4d2dd2 --- /dev/null +++ b/engine/access/ingestion/collection_syncer.go @@ -0,0 +1,411 @@ +package ingestion + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +var ( + defaultMissingCollsForBlockThreshold = missingCollsForBlockThreshold + defaultMissingCollsForAgeThreshold uint64 = missingCollsForAgeThreshold +) + +// The CollectionSyncer type provides mechanisms for syncing and indexing data +// from the Flow blockchain into local storage. Specifically, it handles +// the retrieval and processing of collections and transactions that may +// have been missed due to network delays, restarts, or gaps in finalization. +// +// It is responsible for ensuring the local node has +// all collections associated with finalized blocks starting from the +// last fully synced height. It works by periodically scanning the finalized +// block range, identifying missing collections, and triggering requests +// to fetch them from the network. Once collections are retrieved, it +// ensures they are persisted in the local collection and transaction stores. +// +// The syncer maintains a persistent, strictly monotonic counter +// (`lastFullBlockHeight`) to track the highest finalized block for which +// all collections have been fully indexed. It uses this information to +// avoid redundant processing and to measure catch-up progress. +// +// It is meant to operate in a background goroutine as part of the +// node's ingestion pipeline. +type CollectionSyncer struct { + logger zerolog.Logger + collectionExecutedMetric module.CollectionExecutedMetric + + state protocol.State + requester module.Requester + + blocks storage.Blocks + collections storage.Collections + transactions storage.Transactions + + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter + lockManager storage.LockManager +} + +// NewCollectionSyncer creates a new CollectionSyncer responsible for requesting, +// tracking, and indexing missing collections. +func NewCollectionSyncer( + logger zerolog.Logger, + collectionExecutedMetric module.CollectionExecutedMetric, + requester module.Requester, + state protocol.State, + blocks storage.Blocks, + collections storage.Collections, + transactions storage.Transactions, + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter, + lockManager storage.LockManager, +) *CollectionSyncer { + collectionExecutedMetric.UpdateLastFullBlockHeight(lastFullBlockHeight.Value()) + + return &CollectionSyncer{ + logger: logger, + state: state, + requester: requester, + blocks: blocks, + collections: collections, + transactions: transactions, + lastFullBlockHeight: lastFullBlockHeight, + collectionExecutedMetric: collectionExecutedMetric, + lockManager: lockManager, + } +} + +// RequestCollections continuously monitors and triggers collection sync operations. +// It handles on startup collection catchup, periodic missing collection requests, and full block height updates. +func (s *CollectionSyncer) RequestCollections(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + requestCtx, cancel := context.WithTimeout(ctx, collectionCatchupTimeout) + defer cancel() + + // on start-up, AN wants to download all missing collections to serve it to end users + err := s.requestMissingCollectionsBlocking(requestCtx) + if err != nil { + s.logger.Error().Err(err).Msg("error downloading missing collections") + } + ready() + + requestCollectionsTicker := time.NewTicker(missingCollsRequestInterval) + defer requestCollectionsTicker.Stop() + + // Collections are requested concurrently in this design. + // To maintain accurate progress tracking and avoid redundant requests, + // we periodically update the `lastFullBlockHeight` to reflect the latest + // finalized block with all collections successfully indexed. + updateLastFullBlockHeightTicker := time.NewTicker(fullBlockRefreshInterval) + defer updateLastFullBlockHeightTicker.Stop() + + for { + select { + case <-ctx.Done(): + return + + case <-requestCollectionsTicker.C: + err := s.requestMissingCollections() + if err != nil { + ctx.Throw(err) + } + + case <-updateLastFullBlockHeightTicker.C: + err := s.updateLastFullBlockHeight() + if err != nil { + ctx.Throw(err) + } + } + } +} + +// requestMissingCollections checks if missing collections should be requested based on configured +// block or age thresholds and triggers requests if needed. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) requestMissingCollections() error { + lastFullBlockHeight := s.lastFullBlockHeight.Value() + lastFinalizedBlock, err := s.state.Final().Head() + if err != nil { + return fmt.Errorf("failed to get finalized block: %w", err) + } + + collections, incompleteBlocksCount, err := s.findMissingCollections(lastFullBlockHeight) + if err != nil { + return err + } + + blocksThresholdReached := incompleteBlocksCount >= defaultMissingCollsForBlockThreshold + ageThresholdReached := lastFinalizedBlock.Height-lastFullBlockHeight > defaultMissingCollsForAgeThreshold + shouldRequest := blocksThresholdReached || ageThresholdReached + + if shouldRequest { + // warn log since generally this should not happen + s.logger.Warn(). + Uint64("finalized_height", lastFinalizedBlock.Height). + Uint64("last_full_blk_height", lastFullBlockHeight). + Int("missing_collection_blk_count", incompleteBlocksCount). + Int("missing_collection_count", len(collections)). + Msg("re-requesting missing collections") + + s.requestCollections(collections, false) + } + + return nil +} + +// requestMissingCollectionsBlocking requests and waits for all missing collections to be downloaded, +// blocking until either completion or context timeout. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) requestMissingCollectionsBlocking(ctx context.Context) error { + missingCollections, _, err := s.findMissingCollections(s.lastFullBlockHeight.Value()) + if err != nil { + return err + } + if len(missingCollections) == 0 { + s.logger.Info().Msg("skipping requesting missing collections. no missing collections found") + return nil + } + + s.requestCollections(missingCollections, true) + + collectionsToBeDownloaded := make(map[flow.Identifier]struct{}) + for _, collection := range missingCollections { + collectionsToBeDownloaded[collection.CollectionID] = struct{}{} + } + + collectionStoragePollTicker := time.NewTicker(collectionCatchupDBPollInterval) + defer collectionStoragePollTicker.Stop() + + // we want to wait for all collections to be downloaded so we poll local storage periodically to make sure each + // collection was successfully saved in the storage. + for len(collectionsToBeDownloaded) > 0 { + select { + case <-ctx.Done(): + return fmt.Errorf("failed to complete collection retrieval: %w", ctx.Err()) + + case <-collectionStoragePollTicker.C: + s.logger.Info(). + Int("total_missing_collections", len(collectionsToBeDownloaded)). + Msg("retrieving missing collections...") + + for collectionID := range collectionsToBeDownloaded { + downloaded, err := s.isCollectionInStorage(collectionID) + if err != nil { + return err + } + + if downloaded { + delete(collectionsToBeDownloaded, collectionID) + } + } + } + } + + s.logger.Info().Msg("collection catchup done") + return nil +} + +// findMissingCollections scans block heights from last known full block up to the latest finalized +// block and returns all missing collection along with the count of incomplete blocks. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) findMissingCollections(lastFullBlockHeight uint64) ([]*flow.CollectionGuarantee, int, error) { + // first block to look up collections at + firstBlockHeight := lastFullBlockHeight + 1 + + lastFinalizedBlock, err := s.state.Final().Head() + if err != nil { + return nil, 0, fmt.Errorf("failed to get finalized block: %w", err) + } + // last block to look up collections at + lastBlockHeight := lastFinalizedBlock.Height + + var missingCollections []*flow.CollectionGuarantee + var incompleteBlocksCount int + + for currBlockHeight := firstBlockHeight; currBlockHeight <= lastBlockHeight; currBlockHeight++ { + collections, err := s.findMissingCollectionsAtHeight(currBlockHeight) + if err != nil { + return nil, 0, err + } + + if len(collections) == 0 { + continue + } + + missingCollections = append(missingCollections, collections...) + incompleteBlocksCount += 1 + } + + return missingCollections, incompleteBlocksCount, nil +} + +// findMissingCollectionsAtHeight returns all missing collections for a specific block height. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) findMissingCollectionsAtHeight(height uint64) ([]*flow.CollectionGuarantee, error) { + block, err := s.blocks.ByHeight(height) + if err != nil { + return nil, fmt.Errorf("failed to retrieve block by height %d: %w", height, err) + } + + var missingCollections []*flow.CollectionGuarantee + for _, guarantee := range block.Payload.Guarantees { + inStorage, err := s.isCollectionInStorage(guarantee.CollectionID) + if err != nil { + return nil, err + } + + if !inStorage { + missingCollections = append(missingCollections, guarantee) + } + } + + return missingCollections, nil +} + +// isCollectionInStorage checks whether the given collection is present in local storage. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) isCollectionInStorage(collectionID flow.Identifier) (bool, error) { + _, err := s.collections.LightByID(collectionID) + if err == nil { + return true, nil + } + + if errors.Is(err, storage.ErrNotFound) { + return false, nil + } + + return false, fmt.Errorf("failed to retrieve collection %s: %w", collectionID.String(), err) +} + +// RequestCollectionsForBlock conditionally requests missing collections for a specific block height, +// skipping requests if the block is already below the known full block height. +func (s *CollectionSyncer) RequestCollectionsForBlock(height uint64, missingCollections []*flow.CollectionGuarantee) { + // skip requesting collections, if this block is below the last full block height. + // this means that either we have already received these collections, or the block + // may contain unverifiable guarantees (in case this node has just joined the network) + if height <= s.lastFullBlockHeight.Value() { + s.logger.Debug(). + Msg("skipping requesting collections for finalized block as its collections have been already retrieved") + return + } + + s.requestCollections(missingCollections, false) +} + +// requestCollections registers collection download requests in the requester engine, +// optionally forcing immediate dispatch. +func (s *CollectionSyncer) requestCollections(collections []*flow.CollectionGuarantee, immediately bool) { + for _, collection := range collections { + guarantors, err := protocol.FindGuarantors(s.state, collection) + if err != nil { + // failed to find guarantors for guarantees contained in a finalized block is fatal error + s.logger.Fatal().Err(err).Msgf("could not find guarantors for guarantee %v", collection.ID()) + } + s.requester.EntityByID(collection.ID(), filter.HasNodeID[flow.Identity](guarantors...)) + } + + if immediately { + s.requester.Force() + } +} + +// updateLastFullBlockHeight updates the next highest block height where all previous collections have been indexed. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) updateLastFullBlockHeight() error { + lastFullBlockHeight := s.lastFullBlockHeight.Value() + lastFinalizedBlock, err := s.state.Final().Head() + if err != nil { + return fmt.Errorf("failed to get finalized block: %w", err) + } + + // track the latest contiguous full height + newLastFullBlockHeight, err := s.findLowestBlockHeightWithMissingCollections(lastFullBlockHeight, lastFinalizedBlock.Height) + if err != nil { + return fmt.Errorf("failed to find last full block height: %w", err) + } + + // if more contiguous blocks are now complete, update db + if newLastFullBlockHeight > lastFullBlockHeight { + err := s.lastFullBlockHeight.Set(newLastFullBlockHeight) + if err != nil { + return fmt.Errorf("failed to update last full block height: %w", err) + } + + s.collectionExecutedMetric.UpdateLastFullBlockHeight(newLastFullBlockHeight) + + s.logger.Debug(). + Uint64("last_full_block_height", newLastFullBlockHeight). + Msg("updated last full block height counter") + } + + return nil +} + +// findLowestBlockHeightWithMissingCollections finds the next block height with missing collections, +// returning the latest contiguous height where all collections are present. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) findLowestBlockHeightWithMissingCollections( + lastKnownFullBlockHeight uint64, + finalizedBlockHeight uint64, +) (uint64, error) { + newLastFullBlockHeight := lastKnownFullBlockHeight + + for currBlockHeight := lastKnownFullBlockHeight + 1; currBlockHeight <= finalizedBlockHeight; currBlockHeight++ { + missingCollections, err := s.findMissingCollectionsAtHeight(currBlockHeight) + if err != nil { + return 0, err + } + + // return when we find the first block with missing collections + if len(missingCollections) > 0 { + return newLastFullBlockHeight, nil + } + + newLastFullBlockHeight = currBlockHeight + } + + return newLastFullBlockHeight, nil +} + +// OnCollectionDownloaded indexes and persists a downloaded collection. +// This is a callback intended to be used with the requester engine. +func (s *CollectionSyncer) OnCollectionDownloaded(_ flow.Identifier, entity flow.Entity) { + collection, ok := entity.(*flow.Collection) + if !ok { + s.logger.Error().Msgf("invalid entity type (%T)", entity) + return + } + + // Create a lock context for indexing + lctx := s.lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertCollection) + if err != nil { + // TODO(leo): should be using irrecoverable.Context + s.logger.Fatal().Err(err).Msg("could not acquire lock for collection indexing") + return + } + + err = indexer.IndexCollection(lctx, collection, s.collections, s.logger, s.collectionExecutedMetric) + if err != nil { + s.logger.Error().Err(err).Msg("could not index collection after it has been downloaded") + return + } +} diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index ed7c01fdbbb..bfcaa172b6d 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -2,7 +2,6 @@ package ingestion import ( "context" - "errors" "fmt" "time" @@ -13,13 +12,10 @@ import ( "github.com/onflow/flow-go/engine/access/ingestion/tx_error_messages" "github.com/onflow/flow-go/engine/common/fifoqueue" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/jobqueue" - "github.com/onflow/flow-go/module/state_synchronization/indexer" "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" @@ -42,7 +38,7 @@ const ( // a threshold of number of blocks with missing collections beyond which collections should be re-requested // this is to prevent spamming the collection nodes with request - missingCollsForBlkThreshold = 100 + missingCollsForBlockThreshold = 100 // a threshold of block height beyond which collections should be re-requested (regardless of the number of blocks for which collection are missing) // this is to ensure that if a collection is missing for a long time (in terms of block height) it is eventually re-requested @@ -59,15 +55,6 @@ const ( searchAhead = 1 ) -var ( - defaultCollectionCatchupTimeout = collectionCatchupTimeout - defaultCollectionCatchupDBPollInterval = collectionCatchupDBPollInterval - defaultFullBlockRefreshInterval = fullBlockRefreshInterval - defaultMissingCollsRequestInterval = missingCollsRequestInterval - defaultMissingCollsForBlkThreshold = missingCollsForBlkThreshold - defaultMissingCollsForAgeThreshold uint64 = missingCollsForAgeThreshold -) - // Engine represents the ingestion engine, used to funnel data from other nodes // to a centralized location that can be queried by a user // @@ -85,23 +72,20 @@ type Engine struct { // txResultErrorMessagesChan is used to fetch and store transaction result error messages for blocks txResultErrorMessagesChan chan flow.Identifier - log zerolog.Logger // used to log relevant actions with context - state protocol.State // used to access the protocol state - me module.Local // used to access local node information - request module.Requester // used to request collections + log zerolog.Logger // used to log relevant actions with context + state protocol.State // used to access the protocol state + me module.Local // used to access local node information // storage // FIX: remove direct DB access by substituting indexer module blocks storage.Blocks - headers storage.Headers - collections storage.Collections - transactions storage.Transactions executionReceipts storage.ExecutionReceipts maxReceiptHeight uint64 executionResults storage.ExecutionResults - lastFullBlockHeight *counters.PersistentStrictMonotonicCounter - // metrics + collectionSyncer *CollectionSyncer + // TODO: There's still a need for this metric to be in the ingestion engine rather than collection syncer. + // Maybe it is a good idea to split it up? collectionExecutedMetric module.CollectionExecutedMetric txErrorMessagesCore *tx_error_messages.TxErrorMessagesCore @@ -117,16 +101,12 @@ func New( net network.EngineRegistry, state protocol.State, me module.Local, - request module.Requester, blocks storage.Blocks, - headers storage.Headers, - collections storage.Collections, - transactions storage.Transactions, executionResults storage.ExecutionResults, executionReceipts storage.ExecutionReceipts, - collectionExecutedMetric module.CollectionExecutedMetric, finalizedProcessedHeight storage.ConsumerProgressInitializer, - lastFullBlockHeight *counters.PersistentStrictMonotonicCounter, + collectionSyncer *CollectionSyncer, + collectionExecutedMetric module.CollectionExecutedMetric, txErrorMessagesCore *tx_error_messages.TxErrorMessagesCore, ) (*Engine, error) { executionReceiptsRawQueue, err := fifoqueue.NewFifoQueue(defaultQueueCapacity) @@ -148,24 +128,17 @@ func New( }, ) - collectionExecutedMetric.UpdateLastFullBlockHeight(lastFullBlockHeight.Value()) - // initialize the propagation engine with its dependencies e := &Engine{ log: log.With().Str("engine", "ingestion").Logger(), state: state, me: me, - request: request, blocks: blocks, - headers: headers, - collections: collections, - transactions: transactions, executionResults: executionResults, executionReceipts: executionReceipts, maxReceiptHeight: 0, collectionExecutedMetric: collectionExecutedMetric, finalizedBlockNotifier: engine.NewNotifier(), - lastFullBlockHeight: lastFullBlockHeight, // queue / notifier for execution receipts executionReceiptsNotifier: engine.NewNotifier(), @@ -173,6 +146,7 @@ func New( executionReceiptsQueue: executionReceiptsQueue, messageHandler: messageHandler, txErrorMessagesCore: txErrorMessagesCore, + collectionSyncer: collectionSyncer, } // jobqueue Jobs object that tracks finalized blocks by height. This is used by the finalizedBlockConsumer @@ -202,10 +176,12 @@ func New( // Add workers builder := component.NewComponentManagerBuilder(). - AddWorker(e.processBackground). + AddWorker(e.collectionSyncer.RequestCollections). AddWorker(e.processExecutionReceipts). AddWorker(e.runFinalizedBlockConsumer) + //TODO: should I add a check for nil ptr for collection syncer ? (as done below) + // If txErrorMessagesCore is provided, add a worker responsible for processing // transaction result error messages by receipts. This worker listens for blocks // containing execution receipts and processes any associated transaction result @@ -268,51 +244,6 @@ func (e *Engine) processFinalizedBlockJob(ctx irrecoverable.SignalerContext, job e.log.Error().Err(err).Str("job_id", string(job.ID())).Msg("error during finalized block processing job") } -// processBackground is a background routine responsible for executing periodic tasks related to block processing and collection retrieval. -// It performs tasks such as updating indexes of processed blocks and requesting missing collections from the network. -// This function runs indefinitely until the context is canceled. -// Periodically, it checks for updates in the last fully processed block index and requests missing collections if necessary. -// Additionally, it checks for missing collections across a range of blocks and requests them if certain thresholds are met. -func (e *Engine) processBackground(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - // context with timeout - requestCtx, cancel := context.WithTimeout(ctx, defaultCollectionCatchupTimeout) - defer cancel() - - // request missing collections - err := e.requestMissingCollections(requestCtx) - if err != nil { - e.log.Error().Err(err).Msg("requesting missing collections failed") - } - ready() - - updateTicker := time.NewTicker(defaultFullBlockRefreshInterval) - defer updateTicker.Stop() - - requestTicker := time.NewTicker(defaultMissingCollsRequestInterval) - defer requestTicker.Stop() - - for { - select { - case <-ctx.Done(): - return - - // refresh the LastFullBlockReceived index - case <-updateTicker.C: - err := e.updateLastFullBlockReceivedIndex() - if err != nil { - ctx.Throw(err) - } - - // request missing collections from the network - case <-requestTicker.C: - err := e.checkMissingCollections() - if err != nil { - ctx.Throw(err) - } - } - } -} - // processExecutionReceipts is responsible for processing the execution receipts. // It listens for incoming execution receipts and processes them asynchronously. func (e *Engine) processExecutionReceipts(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { @@ -381,7 +312,7 @@ func (e *Engine) processTransactionResultErrorMessagesByReceipts(ctx irrecoverab case <-ctx.Done(): return case blockID := <-e.txResultErrorMessagesChan: - err := e.txErrorMessagesCore.HandleTransactionResultErrorMessages(ctx, blockID) + err := e.txErrorMessagesCore.FetchErrorMessages(ctx, blockID) if err != nil { // TODO: we should revisit error handling here. // Errors that come from querying the EN and possibly ExecutionNodesForBlockID should be logged and @@ -454,18 +385,7 @@ func (e *Engine) processFinalizedBlock(block *flow.Block) error { } } - // skip requesting collections, if this block is below the last full block height - // this means that either we have already received these collections, or the block - // may contain unverifiable guarantees (in case this node has just joined the network) - lastFullBlockHeight := e.lastFullBlockHeight.Value() - if block.Header.Height <= lastFullBlockHeight { - e.log.Info().Msgf("skipping requesting collections for finalized block below last full block height (%d<=%d)", block.Header.Height, lastFullBlockHeight) - return nil - } - - // queue requesting each of the collections from the collection node - e.requestCollectionsInFinalizedBlock(block.Payload.Guarantees) - + e.collectionSyncer.RequestCollectionsForBlock(block.Header.Height, block.Payload.Guarantees) e.collectionExecutedMetric.BlockFinalized(block) return nil @@ -485,266 +405,3 @@ func (e *Engine) handleExecutionReceipt(_ flow.Identifier, r *flow.ExecutionRece e.collectionExecutedMetric.ExecutionReceiptReceived(r) return nil } - -// OnCollection handles the response of the collection request made earlier when a block was received. -// No errors expected during normal operations. -func (e *Engine) OnCollection(originID flow.Identifier, entity flow.Entity) { - collection, ok := entity.(*flow.Collection) - if !ok { - e.log.Error().Msgf("invalid entity type (%T)", entity) - return - } - - err := indexer.HandleCollection(collection, e.collections, e.transactions, e.log, e.collectionExecutedMetric) - if err != nil { - e.log.Error().Err(err).Msg("could not handle collection") - return - } -} - -// requestMissingCollections requests missing collections for all blocks in the local db storage once at startup -func (e *Engine) requestMissingCollections(ctx context.Context) error { - var startHeight, endHeight uint64 - - // get the height of the last block for which all collections were received - lastFullHeight := e.lastFullBlockHeight.Value() - // start from the next block - startHeight = lastFullHeight + 1 - - // end at the finalized block - finalBlk, err := e.state.Final().Head() - if err != nil { - return err - } - endHeight = finalBlk.Height - - e.log.Info(). - Uint64("start_height", startHeight). - Uint64("end_height", endHeight). - Msg("starting collection catchup") - - // collect all missing collection ids in a map - var missingCollMap = make(map[flow.Identifier]struct{}) - - // iterate through the complete chain and request the missing collections - for i := startHeight; i <= endHeight; i++ { - - // if deadline exceeded or someone cancelled the context - if ctx.Err() != nil { - return fmt.Errorf("failed to complete requests for missing collections: %w", ctx.Err()) - } - - missingColls, err := e.missingCollectionsAtHeight(i) - if err != nil { - return fmt.Errorf("failed to retrieve missing collections by height %d during collection catchup: %w", i, err) - } - - // request the missing collections - e.requestCollectionsInFinalizedBlock(missingColls) - - // add them to the missing collection id map to track later - for _, cg := range missingColls { - missingCollMap[cg.CollectionID] = struct{}{} - } - } - - // if no collections were found to be missing we are done. - if len(missingCollMap) == 0 { - // nothing more to do - e.log.Info().Msg("no missing collections found") - return nil - } - - // the collection catchup needs to happen ASAP when the node starts up. Hence, force the requester to dispatch all request - e.request.Force() - - // track progress of retrieving all the missing collections by polling the db periodically - ticker := time.NewTicker(defaultCollectionCatchupDBPollInterval) - defer ticker.Stop() - - // while there are still missing collections, keep polling - for len(missingCollMap) > 0 { - select { - case <-ctx.Done(): - // context may have expired - return fmt.Errorf("failed to complete collection retreival: %w", ctx.Err()) - case <-ticker.C: - - // log progress - e.log.Info(). - Int("total_missing_collections", len(missingCollMap)). - Msg("retrieving missing collections...") - - var foundColls []flow.Identifier - // query db to find if collections are still missing - for collID := range missingCollMap { - found, err := e.haveCollection(collID) - if err != nil { - return err - } - // if collection found in local db, remove it from missingColls later - if found { - foundColls = append(foundColls, collID) - } - } - - // update the missingColls list by removing collections that have now been received - for _, c := range foundColls { - delete(missingCollMap, c) - } - } - } - - e.log.Info().Msg("collection catchup done") - return nil -} - -// updateLastFullBlockReceivedIndex finds the next highest height where all previous collections -// have been indexed, and updates the LastFullBlockReceived index to that height -func (e *Engine) updateLastFullBlockReceivedIndex() error { - lastFullHeight := e.lastFullBlockHeight.Value() - - finalBlk, err := e.state.Final().Head() - if err != nil { - return fmt.Errorf("failed to get finalized block: %w", err) - } - finalizedHeight := finalBlk.Height - - // track the latest contiguous full height - newLastFullHeight, err := e.lowestHeightWithMissingCollection(lastFullHeight, finalizedHeight) - if err != nil { - return fmt.Errorf("failed to find last full block received height: %w", err) - } - - // if more contiguous blocks are now complete, update db - if newLastFullHeight > lastFullHeight { - err := e.lastFullBlockHeight.Set(newLastFullHeight) - if err != nil { - return fmt.Errorf("failed to update last full block height: %w", err) - } - - e.collectionExecutedMetric.UpdateLastFullBlockHeight(newLastFullHeight) - - e.log.Debug(). - Uint64("last_full_block_height", newLastFullHeight). - Msg("updated LastFullBlockReceived index") - } - - return nil -} - -// lowestHeightWithMissingCollection returns the lowest height that is missing collections -func (e *Engine) lowestHeightWithMissingCollection(lastFullHeight, finalizedHeight uint64) (uint64, error) { - newLastFullHeight := lastFullHeight - - for i := lastFullHeight + 1; i <= finalizedHeight; i++ { - missingColls, err := e.missingCollectionsAtHeight(i) - if err != nil { - return 0, err - } - - // return when we find the first block with missing collections - if len(missingColls) > 0 { - return newLastFullHeight, nil - } - - newLastFullHeight = i - } - - return newLastFullHeight, nil -} - -// checkMissingCollections requests missing collections if the number of blocks missing collections -// have reached the defaultMissingCollsForBlkThreshold value. -func (e *Engine) checkMissingCollections() error { - lastFullHeight := e.lastFullBlockHeight.Value() - - finalBlk, err := e.state.Final().Head() - if err != nil { - return fmt.Errorf("failed to get finalized block: %w", err) - } - finalizedHeight := finalBlk.Height - - // number of blocks with missing collections - incompleteBlksCnt := 0 - - // collect all missing collections - var allMissingColls []*flow.CollectionGuarantee - - // start from the next block till we either hit the finalized block or cross the max collection missing threshold - for i := lastFullHeight + 1; i <= finalizedHeight && incompleteBlksCnt < defaultMissingCollsForBlkThreshold; i++ { - missingColls, err := e.missingCollectionsAtHeight(i) - if err != nil { - return fmt.Errorf("failed to find missing collections at height %d: %w", i, err) - } - - if len(missingColls) == 0 { - continue - } - - incompleteBlksCnt++ - - allMissingColls = append(allMissingColls, missingColls...) - } - - // additionally, if more than threshold blocks have missing collections OR collections are - // missing since defaultMissingCollsForAgeThreshold, re-request those collections - if incompleteBlksCnt >= defaultMissingCollsForBlkThreshold || - (finalizedHeight-lastFullHeight) > defaultMissingCollsForAgeThreshold { - // warn log since this should generally not happen - e.log.Warn(). - Uint64("finalized_height", finalizedHeight). - Uint64("last_full_blk_height", lastFullHeight). - Int("missing_collection_blk_count", incompleteBlksCnt). - Int("missing_collection_count", len(allMissingColls)). - Msg("re-requesting missing collections") - e.requestCollectionsInFinalizedBlock(allMissingColls) - } - - return nil -} - -// missingCollectionsAtHeight returns all missing collection guarantees at a given height -func (e *Engine) missingCollectionsAtHeight(h uint64) ([]*flow.CollectionGuarantee, error) { - block, err := e.blocks.ByHeight(h) - if err != nil { - return nil, fmt.Errorf("failed to retrieve block by height %d: %w", h, err) - } - - var missingColls []*flow.CollectionGuarantee - for _, guarantee := range block.Payload.Guarantees { - collID := guarantee.CollectionID - found, err := e.haveCollection(collID) - if err != nil { - return nil, err - } - if !found { - missingColls = append(missingColls, guarantee) - } - } - return missingColls, nil -} - -// haveCollection looks up the collection from the collection db with collID -func (e *Engine) haveCollection(collID flow.Identifier) (bool, error) { - _, err := e.collections.LightByID(collID) - if err == nil { - return true, nil - } - if errors.Is(err, storage.ErrNotFound) { - return false, nil - } - return false, fmt.Errorf("failed to retrieve collection %s: %w", collID.String(), err) -} - -// requestCollectionsInFinalizedBlock registers collection requests with the requester engine -func (e *Engine) requestCollectionsInFinalizedBlock(missingColls []*flow.CollectionGuarantee) { - for _, cg := range missingColls { - guarantors, err := protocol.FindGuarantors(e.state, cg) - if err != nil { - // failed to find guarantors for guarantees contained in a finalized block is fatal error - e.log.Fatal().Err(err).Msgf("could not find guarantors for guarantee %v", cg.ID()) - } - e.request.EntityByID(cg.ID(), filter.HasNodeID[flow.Identity](guarantors...)) - } -} diff --git a/engine/access/ingestion/engine_test.go b/engine/access/ingestion/engine_test.go index d8daf7ce3e9..0396e01bb91 100644 --- a/engine/access/ingestion/engine_test.go +++ b/engine/access/ingestion/engine_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -77,6 +78,7 @@ type Suite struct { db *badger.DB dbDir string lastFullBlockHeight *counters.PersistentStrictMonotonicCounter + lockManager lockctx.Manager } func TestIngestEngine(t *testing.T) { @@ -94,6 +96,7 @@ func (s *Suite) SetupTest() { s.log = zerolog.New(os.Stderr) s.ctx, s.cancel = context.WithCancel(context.Background()) s.db, s.dbDir = unittest.TempBadgerDB(s.T()) + s.lockManager = storerr.NewTestingLockManager() s.obsIdentity = unittest.IdentityFixture(unittest.WithRole(flow.RoleAccess)) @@ -185,8 +188,9 @@ func (s *Suite) SetupTest() { require.NoError(s.T(), err) } -// initIngestionEngine create new instance of ingestion engine and waits when it starts -func (s *Suite) initIngestionEngine(ctx irrecoverable.SignalerContext) *Engine { +// initEngineAndSyncer create new instance of ingestion engine and collection syncer. +// It waits until the ingestion engine starts. +func (s *Suite) initEngineAndSyncer(ctx irrecoverable.SignalerContext) (*Engine, *CollectionSyncer) { processedHeightInitializer := store.NewConsumerProgress(badgerimpl.ToDB(s.db), module.ConsumeProgressIngestionEngineBlockHeight) lastFullBlockHeight, err := store.NewConsumerProgress(badgerimpl.ToDB(s.db), module.ConsumeProgressLastFullBlockHeight).Initialize(s.finalizedBlock.Height) @@ -195,21 +199,29 @@ func (s *Suite) initIngestionEngine(ctx irrecoverable.SignalerContext) *Engine { s.lastFullBlockHeight, err = counters.NewPersistentStrictMonotonicCounter(lastFullBlockHeight) require.NoError(s.T(), err) + syncer := NewCollectionSyncer( + s.log, + s.collectionExecutedMetric, + s.request, + s.proto.state, + s.blocks, + s.collections, + s.transactions, + s.lastFullBlockHeight, + s.lockManager, + ) + eng, err := New( s.log, s.net, s.proto.state, s.me, - s.request, s.blocks, - s.headers, - s.collections, - s.transactions, s.results, s.receipts, - s.collectionExecutedMetric, processedHeightInitializer, - s.lastFullBlockHeight, + syncer, + s.collectionExecutedMetric, nil, ) @@ -218,7 +230,7 @@ func (s *Suite) initIngestionEngine(ctx irrecoverable.SignalerContext) *Engine { eng.ComponentManager.Start(ctx) <-eng.Ready() - return eng + return eng, syncer } // mockCollectionsForBlock mocks collections for block @@ -271,7 +283,7 @@ func (s *Suite) TestOnFinalizedBlockSingle() { cluster.On("Members").Return(clusterCommittee, nil) irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) - eng := s.initIngestionEngine(irrecoverableCtx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) block := s.generateBlock(clusterCommittee, snap) block.Header.Height = s.finalizedBlock.Height + 1 @@ -327,7 +339,7 @@ func (s *Suite) TestOnFinalizedBlockSeveralBlocksAhead() { cluster.On("Members").Return(clusterCommittee, nil) irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) - eng := s.initIngestionEngine(irrecoverableCtx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) newBlocksCount := 3 startHeight := s.finalizedBlock.Height + 1 @@ -390,46 +402,38 @@ func (s *Suite) TestOnFinalizedBlockSeveralBlocksAhead() { // TestOnCollection checks that when a Collection is received, it is persisted func (s *Suite) TestOnCollection() { irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) - s.initIngestionEngine(irrecoverableCtx) + s.initEngineAndSyncer(irrecoverableCtx) collection := unittest.CollectionFixture(5) light := collection.Light() // we should store the light collection and index its transactions - s.collections.On("StoreLightAndIndexByTransaction", &light).Return(nil).Once() + s.collections.On("StoreAndIndexByTransaction", mock.Anything, &collection).Return(light, nil).Once() - // for each transaction in the collection, we should store it - needed := make(map[flow.Identifier]struct{}) - for _, txID := range light.Transactions { - needed[txID] = struct{}{} - } - s.transactions.On("Store", mock.Anything).Return(nil).Run( - func(args mock.Arguments) { - tx := args.Get(0).(*flow.TransactionBody) - _, pending := needed[tx.ID()] - s.Assert().True(pending, "tx not pending (%x)", tx.ID()) - }, - ) + // Create a lock context for indexing + lctx := s.lockManager.NewContext() + err := lctx.AcquireLock(storerr.LockInsertCollection) + require.NoError(s.T(), err) + defer lctx.Release() - err := indexer.HandleCollection(&collection, s.collections, s.transactions, s.log, s.collectionExecutedMetric) + err = indexer.IndexCollection(lctx, &collection, s.collections, s.log, s.collectionExecutedMetric) require.NoError(s.T(), err) - // check that the collection was stored and indexed, and we stored all transactions + // check that the collection was stored and indexed s.collections.AssertExpectations(s.T()) - s.transactions.AssertNumberOfCalls(s.T(), "Store", len(collection.Transactions)) } // TestExecutionReceiptsAreIndexed checks that execution receipts are properly indexed func (s *Suite) TestExecutionReceiptsAreIndexed() { irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) - eng := s.initIngestionEngine(irrecoverableCtx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) originID := unittest.IdentifierFixture() collection := unittest.CollectionFixture(5) light := collection.Light() // we should store the light collection and index its transactions - s.collections.On("StoreLightAndIndexByTransaction", &light).Return(nil).Once() + s.collections.On("StoreAndIndexByTransaction", &collection).Return(light, nil).Once() block := &flow.Block{ Header: &flow.Header{Height: 0}, Payload: &flow.Payload{Guarantees: []*flow.CollectionGuarantee{}}, @@ -472,39 +476,31 @@ func (s *Suite) TestExecutionReceiptsAreIndexed() { // crash but just ignores its transactions. func (s *Suite) TestOnCollectionDuplicate() { irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) - s.initIngestionEngine(irrecoverableCtx) + s.initEngineAndSyncer(irrecoverableCtx) collection := unittest.CollectionFixture(5) light := collection.Light() // we should store the light collection and index its transactions - s.collections.On("StoreLightAndIndexByTransaction", &light).Return(storerr.ErrAlreadyExists).Once() + s.collections.On("StoreAndIndexByTransaction", mock.Anything, &collection).Return(light, storerr.ErrAlreadyExists).Once() - // for each transaction in the collection, we should store it - needed := make(map[flow.Identifier]struct{}) - for _, txID := range light.Transactions { - needed[txID] = struct{}{} - } - s.transactions.On("Store", mock.Anything).Return(nil).Run( - func(args mock.Arguments) { - tx := args.Get(0).(*flow.TransactionBody) - _, pending := needed[tx.ID()] - s.Assert().True(pending, "tx not pending (%x)", tx.ID()) - }, - ) - - err := indexer.HandleCollection(&collection, s.collections, s.transactions, s.log, s.collectionExecutedMetric) + // Create a lock context for indexing + lctx := s.lockManager.NewContext() + err := lctx.AcquireLock(storerr.LockInsertCollection) require.NoError(s.T(), err) + defer lctx.Release() + + err = indexer.IndexCollection(lctx, &collection, s.collections, s.log, s.collectionExecutedMetric) + require.ErrorIs(s.T(), err, storerr.ErrAlreadyExists) - // check that the collection was stored and indexed, and we stored all transactions + // check that the collection was stored and indexed s.collections.AssertExpectations(s.T()) - s.transactions.AssertNotCalled(s.T(), "Store", "should not store any transactions") } // TestRequestMissingCollections tests that the all missing collections are requested on the call to requestMissingCollections func (s *Suite) TestRequestMissingCollections() { irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) - eng := s.initIngestionEngine(irrecoverableCtx) + _, syncer := s.initEngineAndSyncer(irrecoverableCtx) blkCnt := 3 startHeight := uint64(1000) @@ -602,10 +598,10 @@ func (s *Suite) TestRequestMissingCollections() { p = 1 // timeout after 3 db polls - ctx, cancel := context.WithTimeout(context.Background(), 100*defaultCollectionCatchupDBPollInterval) + ctx, cancel := context.WithTimeout(context.Background(), 100*collectionCatchupDBPollInterval) defer cancel() - err := eng.requestMissingCollections(ctx) + err := syncer.requestMissingCollectionsBlocking(ctx) require.Error(s.T(), err) require.Contains(s.T(), err.Error(), "context deadline exceeded") @@ -618,10 +614,10 @@ func (s *Suite) TestRequestMissingCollections() { // 90% of the time, collections are reported as not received when the collection storage is queried p = 0.9 - ctx, cancel := context.WithTimeout(context.Background(), defaultCollectionCatchupTimeout) + ctx, cancel := context.WithTimeout(context.Background(), collectionCatchupTimeout) defer cancel() - err := eng.requestMissingCollections(ctx) + err := syncer.requestMissingCollectionsBlocking(ctx) require.NoError(s.T(), err) require.Len(s.T(), rcvdColl, len(collIDs)) @@ -630,12 +626,12 @@ func (s *Suite) TestRequestMissingCollections() { }) } -// TestProcessBackgroundCalls tests that updateLastFullBlockReceivedIndex and checkMissingCollections +// TestProcessBackgroundCalls tests that updateLastFullBlockHeight and checkMissingCollections // function calls keep the FullBlockIndex up-to-date and request collections if blocks with missing // collections exceed the threshold. func (s *Suite) TestProcessBackgroundCalls() { irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) - eng := s.initIngestionEngine(irrecoverableCtx) + _, syncer := s.initEngineAndSyncer(irrecoverableCtx) blkCnt := 3 collPerBlk := 10 @@ -712,9 +708,9 @@ func (s *Suite) TestProcessBackgroundCalls() { err := s.lastFullBlockHeight.Set(rootBlk.Header.Height) s.Require().NoError(err) - s.Run("missing collections are requested when count exceeds defaultMissingCollsForBlkThreshold", func() { + s.Run("missing collections are requested when count exceeds defaultMissingCollsForBlockThreshold", func() { // lower the block threshold to request missing collections - defaultMissingCollsForBlkThreshold = 2 + defaultMissingCollsForBlockThreshold = 2 // mark all blocks beyond the root block as incomplete for i := 1; i < blkCnt; i++ { @@ -725,7 +721,7 @@ func (s *Suite) TestProcessBackgroundCalls() { } } - err := eng.checkMissingCollections() + err := syncer.requestMissingCollections() s.Require().NoError(err) // assert that missing collections are requested @@ -740,7 +736,7 @@ func (s *Suite) TestProcessBackgroundCalls() { defaultMissingCollsForAgeThreshold = 1 // raise the block threshold to ensure it does not trigger missing collection request - defaultMissingCollsForBlkThreshold = blkCnt + 1 + defaultMissingCollsForBlockThreshold = blkCnt + 1 // mark all blocks beyond the root block as incomplete for i := 1; i < blkCnt; i++ { @@ -751,7 +747,7 @@ func (s *Suite) TestProcessBackgroundCalls() { } } - err := eng.checkMissingCollections() + err := syncer.requestMissingCollections() s.Require().NoError(err) // assert that missing collections are requested @@ -761,17 +757,17 @@ func (s *Suite) TestProcessBackgroundCalls() { s.blocks.AssertExpectations(s.T()) // not new call to UpdateLastFullBlockHeight should be made }) - s.Run("missing collections are not requested if defaultMissingCollsForBlkThreshold not reached", func() { + s.Run("missing collections are not requested if defaultMissingCollsForBlockThreshold not reached", func() { // raise the thresholds to avoid requesting missing collections defaultMissingCollsForAgeThreshold = 3 - defaultMissingCollsForBlkThreshold = 3 + defaultMissingCollsForBlockThreshold = 3 // mark all blocks beyond the root block as incomplete for i := 1; i < blkCnt; i++ { blkMissingColl[i] = true } - err := eng.checkMissingCollections() + err := syncer.requestMissingCollections() s.Require().NoError(err) // assert that missing collections are not requested even though there are collections missing @@ -797,7 +793,7 @@ func (s *Suite) TestProcessBackgroundCalls() { err = s.lastFullBlockHeight.Set(blockBeforeFinalized.Height) s.Require().NoError(err) - err = eng.updateLastFullBlockReceivedIndex() + err = syncer.updateLastFullBlockHeight() s.Require().NoError(err) s.Require().Equal(finalizedHeight, s.lastFullBlockHeight.Value()) s.Require().NoError(err) @@ -806,7 +802,7 @@ func (s *Suite) TestProcessBackgroundCalls() { }) s.Run("full block height index is not advanced beyond finalized blocks", func() { - err = eng.updateLastFullBlockReceivedIndex() + err = syncer.updateLastFullBlockHeight() s.Require().NoError(err) s.Require().Equal(finalizedHeight, s.lastFullBlockHeight.Value()) @@ -816,7 +812,7 @@ func (s *Suite) TestProcessBackgroundCalls() { func (s *Suite) TestComponentShutdown() { irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) - eng := s.initIngestionEngine(irrecoverableCtx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) // start then shut down the engine unittest.AssertClosesBefore(s.T(), eng.Ready(), 10*time.Millisecond) diff --git a/engine/access/ingestion/tx_error_messages/mock/requester.go b/engine/access/ingestion/tx_error_messages/mock/requester.go new file mode 100644 index 00000000000..dd0e7438d0d --- /dev/null +++ b/engine/access/ingestion/tx_error_messages/mock/requester.go @@ -0,0 +1,59 @@ +// Code generated by mockery v2.53.3. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// Requester is an autogenerated mock type for the Requester type +type Requester struct { + mock.Mock +} + +// Request provides a mock function with given fields: ctx +func (_m *Requester) Request(ctx context.Context) ([]flow.TransactionResultErrorMessage, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Request") + } + + var r0 []flow.TransactionResultErrorMessage + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]flow.TransactionResultErrorMessage, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []flow.TransactionResultErrorMessage); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.TransactionResultErrorMessage) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewRequester creates a new instance of Requester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRequester(t interface { + mock.TestingT + Cleanup(func()) +}) *Requester { + mock := &Requester{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/ingestion/tx_error_messages/requester.go b/engine/access/ingestion/tx_error_messages/requester.go new file mode 100644 index 00000000000..a759cc9a6cb --- /dev/null +++ b/engine/access/ingestion/tx_error_messages/requester.go @@ -0,0 +1,184 @@ +package tx_error_messages + +import ( + "context" + "errors" + "fmt" + "time" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "github.com/sethvargo/go-retry" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/utils/logging" + + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" +) + +// Requester defines the interface for requesting transaction result error messages. +type Requester interface { + // Request fetches transaction results error messages. + // Expected errors: + // - context.Canceled: if the provided context was canceled before completion + // + // No other errors are expected during normal operations + Request(ctx context.Context) ([]flow.TransactionResultErrorMessage, error) +} + +// RequesterConfig contains the retry settings for the tx error messages fetch. +type RequesterConfig struct { + // the initial delay used in the exponential backoff for failed tx error messages download + // retries. + RetryDelay time.Duration + // the max delay used in the exponential backoff for failed tx error messages download. + MaxRetryDelay time.Duration +} + +var _ Requester = (*RequesterImpl)(nil) + +type RequesterImpl struct { + logger zerolog.Logger + config *RequesterConfig + txErrorMessageProvider error_messages.Provider + execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider + executionResult *flow.ExecutionResult +} + +func NewRequester( + logger zerolog.Logger, + config *RequesterConfig, + txErrorMessageProvider error_messages.Provider, + execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider, + executionResult *flow.ExecutionResult, +) *RequesterImpl { + return &RequesterImpl{ + logger: logger, + config: config, + txErrorMessageProvider: txErrorMessageProvider, + execNodeIdentitiesProvider: execNodeIdentitiesProvider, + executionResult: executionResult, + } +} + +// Request fetches transaction error messages for the specific +// execution result this requester was configured with. +// +// Expected errors expected during normal operations: +// - context.DeadlineExceeded - if context timeouts +// - context.Canceled - if context was canceled +func (r *RequesterImpl) Request(ctx context.Context) ([]flow.TransactionResultErrorMessage, error) { + backoff := retry.NewExponential(r.config.RetryDelay) + backoff = retry.WithCappedDuration(r.config.MaxRetryDelay, backoff) + backoff = retry.WithJitterPercent(15, backoff) + + blockID := r.executionResult.BlockID + resultID := r.executionResult.ID() + + var errorMessages []flow.TransactionResultErrorMessage + + attempt := 0 + err := retry.Do(ctx, backoff, func(context.Context) error { + if attempt > 0 { + r.logger.Debug(). + Str("block_id", blockID.String()). + Str("result_id", resultID.String()). + Uint64("attempt", uint64(attempt)). + Msgf("retrying download") + } + attempt++ + + var err error + errorMessages, err = r.request(ctx, blockID, resultID) + if err == nil { + return nil + } + + // retry if there are no acceptable ENs to download messages from at this point + if errors.Is(err, rpc.ErrNoENsFoundForExecutionResult) { + return retry.RetryableError(err) + } + + // retry any grpc error except context canceled and deadline exceeded + if status, ok := status.FromError(err); ok { + if status.Code() == codes.DeadlineExceeded || status.Code() == codes.Canceled { + return errors.Join(err, ctx.Err()) + } + + return retry.RetryableError(err) + } + + return err + }) + + if err != nil { + return nil, err + } + return errorMessages, nil +} + +// request retrieves transaction error messages for a given block and result ID +// by querying the appropriate execution nodes. It returns a slice of error +// messages or an error if the retrieval fails. +// +// Expected errors during normal operations: +// 1. rpc.ErrNoENsFoundForExecutionResult - if no execution nodes were found that produced +// the provided execution result and matched the operators criteria +// 2. status.Error - GRPC call failed, some of possible codes are: +// - codes.NotFound - request cannot be served by EN because of absence of data. +// - codes.Unavailable - remote node is not unavailable. +// - codes.Canceled - if ctx is canceled during request +func (r *RequesterImpl) request( + ctx context.Context, + blockID flow.Identifier, + resultID flow.Identifier, +) ([]flow.TransactionResultErrorMessage, error) { + execNodes, err := r.execNodeIdentitiesProvider.ExecutionNodesForResultID(blockID, resultID) + if err != nil { + r.logger.Error().Err(err). + Str("block_id", blockID.String()). + Str("result_id", resultID.String()). + Msg("failed to find execution nodes for specific result ID") + return nil, fmt.Errorf("could not find execution nodes for result %v in block %v: %w", resultID, blockID, err) + } + + r.logger.Debug(). + Hex("block_id", logging.ID(blockID)). + Msg("started downloading transaction error messages for block") + + req := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: convert.IdentifierToMessage(blockID), + } + + resp, execNode, err := r.txErrorMessageProvider.ErrorMessageByBlockIDFromAnyEN(ctx, execNodes, req) + if err != nil { + r.logger.Error().Err(err). + Msgf("failed to get transaction error messages from execution nodes for blockID: %s", blockID.String()) + return nil, err + } + + errorMessages := r.convertResponse(resp, execNode) + return errorMessages, nil +} + +func (r *RequesterImpl) convertResponse( + responseMessages []*execproto.GetTransactionErrorMessagesResponse_Result, + execNode *flow.IdentitySkeleton, +) []flow.TransactionResultErrorMessage { + errorMessages := make([]flow.TransactionResultErrorMessage, 0, len(responseMessages)) + for _, value := range responseMessages { + errorMessage := flow.TransactionResultErrorMessage{ + ErrorMessage: value.ErrorMessage, + TransactionID: convert.MessageToIdentifier(value.TransactionId), + Index: value.Index, + ExecutorID: execNode.NodeID, + } + errorMessages = append(errorMessages, errorMessage) + } + + return errorMessages +} diff --git a/engine/access/ingestion/tx_error_messages/requester_test.go b/engine/access/ingestion/tx_error_messages/requester_test.go new file mode 100644 index 00000000000..f9bf53d8731 --- /dev/null +++ b/engine/access/ingestion/tx_error_messages/requester_test.go @@ -0,0 +1,256 @@ +package tx_error_messages + +import ( + "context" + "errors" + "testing" + "time" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/index" + accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" + protocol "github.com/onflow/flow-go/state/protocol/mock" + storage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +type RequesterSuite struct { + suite.Suite + + log zerolog.Logger + proto struct { + state *protocol.FollowerState + snapshot *protocol.Snapshot + params *protocol.Params + } + + receipts *storage.ExecutionReceipts + enNodeIDs flow.IdentityList + execClient *accessmock.ExecutionAPIClient + connFactory *connectionmock.ConnectionFactory + + rootBlock flow.Block + finalizedBlock *flow.Header + + txErrorMessages *storage.TransactionResultErrorMessages + lightTxResults *storage.LightTransactionResults + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + txResultsIndex *index.TransactionResultsIndex +} + +func TestRequester(t *testing.T) { + suite.Run(t, new(RequesterSuite)) +} + +func (s *RequesterSuite) SetupTest() { + s.log = unittest.Logger() + s.proto.state = protocol.NewFollowerState(s.T()) + s.proto.snapshot = protocol.NewSnapshot(s.T()) + s.proto.params = protocol.NewParams(s.T()) + s.execClient = accessmock.NewExecutionAPIClient(s.T()) + s.connFactory = connectionmock.NewConnectionFactory(s.T()) + s.receipts = storage.NewExecutionReceipts(s.T()) + + s.txErrorMessages = storage.NewTransactionResultErrorMessages(s.T()) + s.lightTxResults = storage.NewLightTransactionResults(s.T()) + s.reporter = syncmock.NewIndexReporter(s.T()) + s.indexReporter = index.NewReporter() + err := s.indexReporter.Initialize(s.reporter) + s.Require().NoError(err) + s.txResultsIndex = index.NewTransactionResultsIndex(s.indexReporter, s.lightTxResults) + + s.rootBlock = unittest.BlockFixture() + s.rootBlock.Header.Height = 0 + s.finalizedBlock = unittest.BlockWithParentFixture(s.rootBlock.Header).Header + + s.proto.params.On("FinalizedRoot").Return(s.rootBlock.Header, nil) + s.proto.state.On("Params").Return(s.proto.params) + + s.proto.snapshot.On("Head").Return( + func() *flow.Header { + return s.finalizedBlock + }, + nil, + ).Maybe() + s.proto.state.On("Final").Return(s.proto.snapshot, nil) + + s.enNodeIDs = unittest.IdentityListFixture(1, unittest.WithRole(flow.RoleExecution)) +} + +func (s *RequesterSuite) TestRequest_HappyPath() { + execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + s.log, + s.proto.state, + s.receipts, + flow.IdentifierList{}, + s.enNodeIDs.NodeIDs(), + ) + + errorMessageProvider := error_messages.NewTxErrorMessageProvider( + s.log, + s.txErrorMessages, + s.txResultsIndex, + s.connFactory, + node_communicator.NewNodeCommunicator(false), + execNodeIdentitiesProvider, + ) + + block := unittest.BlockWithParentFixture(s.finalizedBlock) + blockId := block.ID() + executionResult := &flow.ExecutionResult{ + BlockID: blockId, + Chunks: unittest.ChunkListFixture(1, blockId, unittest.StateCommitmentFixture()), + } + s.connFactory.On("GetExecutionAPIClient", mock.Anything).Return(s.execClient, &mockCloser{}, nil) + + // Mock the protocol snapshot to return fixed execution node IDs. + setupReceiptsForBlockWithResult(s.receipts, executionResult, s.enNodeIDs.NodeIDs()...) + s.proto.snapshot.On("Identities", mock.Anything).Return(s.enNodeIDs, nil) + + // Create mock transaction results with a mix of failed and non-failed transactions. + resultsByBlockID := mockTransactionResultsByBlock(5) + exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: blockId[:], + } + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(createTransactionErrorMessagesResponse(resultsByBlockID), nil). + Once() + + expectedErrorMessages := createExpectedTxErrorMessages(resultsByBlockID, s.enNodeIDs.NodeIDs()[0]) + config := &RequesterConfig{ + RetryDelay: 1 * time.Second, + MaxRetryDelay: 5 * time.Second, + } + requester := NewRequester(s.log, config, errorMessageProvider, execNodeIdentitiesProvider, executionResult) + actualErrorMessages, err := requester.Request(context.Background()) + require.NoError(s.T(), err) + require.ElementsMatch(s.T(), expectedErrorMessages, actualErrorMessages) +} + +func (s *RequesterSuite) TestRequest_ErrorCases() { + execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + s.log, + s.proto.state, + s.receipts, + flow.IdentifierList{}, + s.enNodeIDs.NodeIDs(), + ) + + errorMessageProvider := error_messages.NewTxErrorMessageProvider( + s.log, + s.txErrorMessages, + s.txResultsIndex, + s.connFactory, + node_communicator.NewNodeCommunicator(false), + execNodeIdentitiesProvider, + ) + + block := unittest.BlockWithParentFixture(s.finalizedBlock) + blockId := block.ID() + executionResult := &flow.ExecutionResult{ + BlockID: blockId, + Chunks: unittest.ChunkListFixture(1, blockId, unittest.StateCommitmentFixture()), + } + config := &RequesterConfig{ + RetryDelay: 1 * time.Second, + MaxRetryDelay: 5 * time.Second, + } + + s.connFactory.On("GetExecutionAPIClient", mock.Anything).Return(s.execClient, &mockCloser{}, nil) + + // Mock the protocol snapshot to return fixed execution node IDs. + setupReceiptsForBlockWithResult(s.receipts, executionResult, s.enNodeIDs.NodeIDs()...) + s.proto.snapshot.On("Identities", mock.Anything).Return(s.enNodeIDs, nil) + + exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: blockId[:], + } + + s.T().Run("Non-retryable error", func(t *testing.T) { + expectedError := errors.New("non-retryable error") + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(nil, expectedError). + Once() + + requester := NewRequester(s.log, config, errorMessageProvider, execNodeIdentitiesProvider, executionResult) + actualErrorMessages, err := requester.Request(context.Background()) + require.ErrorIs(s.T(), err, expectedError) + require.Nil(s.T(), actualErrorMessages) + }) + + s.T().Run("Non-retryable grpc DeadlineExceeded error", func(t *testing.T) { + expectedError := status.Error(codes.DeadlineExceeded, "deadline exceeded") + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(nil, expectedError). + Once() + + requester := NewRequester(s.log, config, errorMessageProvider, execNodeIdentitiesProvider, executionResult) + actualErrorMessages, err := requester.Request(context.Background()) + require.ErrorIs(s.T(), err, expectedError) + require.Nil(s.T(), actualErrorMessages) + }) + + s.T().Run("Non-retryable grpc Canceled error", func(t *testing.T) { + expectedError := status.Error(codes.Canceled, "context canceled") + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(nil, expectedError). + Once() + + requester := NewRequester(s.log, config, errorMessageProvider, execNodeIdentitiesProvider, executionResult) + actualErrorMessages, err := requester.Request(context.Background()) + require.ErrorIs(s.T(), err, expectedError) + require.Nil(s.T(), actualErrorMessages) + }) + + s.T().Run("Retryable ErrNoENsFoundForExecutionResult error", func(t *testing.T) { + // first time return retryable error + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(nil, commonrpc.ErrNoENsFoundForExecutionResult). + Once() + + // second time return error messages + resultsByBlockID := mockTransactionResultsByBlock(5) + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(createTransactionErrorMessagesResponse(resultsByBlockID), nil). + Once() + + expectedErrorMessages := createExpectedTxErrorMessages(resultsByBlockID, s.enNodeIDs.NodeIDs()[0]) + requester := NewRequester(s.log, config, errorMessageProvider, execNodeIdentitiesProvider, executionResult) + actualErrorMessages, err := requester.Request(context.Background()) + require.NoError(s.T(), err) + require.ElementsMatch(s.T(), expectedErrorMessages, actualErrorMessages) + }) + + s.T().Run("Retryable valid grpc error", func(t *testing.T) { + // first time return retryable error + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(nil, status.Error(codes.NotFound, "not found")). + Once() + + // second time return error messages + resultsByBlockID := mockTransactionResultsByBlock(5) + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(createTransactionErrorMessagesResponse(resultsByBlockID), nil). + Once() + + expectedErrorMessages := createExpectedTxErrorMessages(resultsByBlockID, s.enNodeIDs.NodeIDs()[0]) + requester := NewRequester(s.log, config, errorMessageProvider, execNodeIdentitiesProvider, executionResult) + actualErrorMessages, err := requester.Request(context.Background()) + require.NoError(s.T(), err) + require.ElementsMatch(s.T(), expectedErrorMessages, actualErrorMessages) + }) +} diff --git a/engine/access/ingestion/tx_error_messages/tx_error_messages_core.go b/engine/access/ingestion/tx_error_messages/tx_error_messages_core.go index 12dcb906cf5..1ce681e051c 100644 --- a/engine/access/ingestion/tx_error_messages/tx_error_messages_core.go +++ b/engine/access/ingestion/tx_error_messages/tx_error_messages_core.go @@ -6,7 +6,7 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" @@ -21,7 +21,7 @@ import ( type TxErrorMessagesCore struct { log zerolog.Logger // used to log relevant actions with context - backend *backend.Backend + txErrorMessageProvider error_messages.Provider transactionResultErrorMessages storage.TransactionResultErrorMessages execNodeIdentitiesProvider *commonrpc.ExecutionNodeIdentitiesProvider } @@ -29,20 +29,20 @@ type TxErrorMessagesCore struct { // NewTxErrorMessagesCore creates a new instance of TxErrorMessagesCore. func NewTxErrorMessagesCore( log zerolog.Logger, - backend *backend.Backend, + txErrorMessageProvider error_messages.Provider, transactionResultErrorMessages storage.TransactionResultErrorMessages, execNodeIdentitiesProvider *commonrpc.ExecutionNodeIdentitiesProvider, ) *TxErrorMessagesCore { return &TxErrorMessagesCore{ log: log.With().Str("module", "tx_error_messages_core").Logger(), - backend: backend, + txErrorMessageProvider: txErrorMessageProvider, transactionResultErrorMessages: transactionResultErrorMessages, execNodeIdentitiesProvider: execNodeIdentitiesProvider, } } -// HandleTransactionResultErrorMessages processes transaction result error messages for a given block ID. -// It retrieves error messages from the backend if they do not already exist in storage. +// FetchErrorMessages processes transaction result error messages for a given block ID. +// It retrieves error messages from the txErrorMessageProvider if they do not already exist in storage. // // The function first checks if error messages for the given block ID are already present in storage. // If they are not, it fetches the messages from execution nodes and stores them. @@ -52,17 +52,17 @@ func NewTxErrorMessagesCore( // - blockID: The identifier of the block for which transaction result error messages need to be processed. // // No errors are expected during normal operation. -func (c *TxErrorMessagesCore) HandleTransactionResultErrorMessages(ctx context.Context, blockID flow.Identifier) error { +func (c *TxErrorMessagesCore) FetchErrorMessages(ctx context.Context, blockID flow.Identifier) error { execNodes, err := c.execNodeIdentitiesProvider.ExecutionNodesForBlockID(ctx, blockID) if err != nil { c.log.Error().Err(err).Msg(fmt.Sprintf("failed to find execution nodes for block id: %s", blockID)) return fmt.Errorf("could not find execution nodes for block: %w", err) } - return c.HandleTransactionResultErrorMessagesByENs(ctx, blockID, execNodes) + return c.FetchErrorMessagesByENs(ctx, blockID, execNodes) } -func (c *TxErrorMessagesCore) HandleTransactionResultErrorMessagesByENs( +func (c *TxErrorMessagesCore) FetchErrorMessagesByENs( ctx context.Context, blockID flow.Identifier, execNodes flow.IdentitySkeletonList, @@ -76,7 +76,7 @@ func (c *TxErrorMessagesCore) HandleTransactionResultErrorMessagesByENs( return nil } - // retrieves error messages from the backend if they do not already exist in storage + // retrieves error messages from the txErrorMessageProvider if they do not already exist in storage req := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ BlockId: convert.IdentifierToMessage(blockID), } @@ -84,7 +84,7 @@ func (c *TxErrorMessagesCore) HandleTransactionResultErrorMessagesByENs( c.log.Debug(). Msgf("transaction error messages for block %s are being downloaded", blockID) - resp, execNode, err := c.backend.GetTransactionErrorMessagesFromAnyEN(ctx, execNodes, req) + resp, execNode, err := c.txErrorMessageProvider.ErrorMessageByBlockIDFromAnyEN(ctx, execNodes, req) if err != nil { c.log.Error().Err(err).Msg("failed to get transaction error messages from execution nodes") return err diff --git a/engine/access/ingestion/tx_error_messages/tx_error_messages_core_test.go b/engine/access/ingestion/tx_error_messages/tx_error_messages_core_test.go index f5bf2715bb4..cd737de69c5 100644 --- a/engine/access/ingestion/tx_error_messages/tx_error_messages_core_test.go +++ b/engine/access/ingestion/tx_error_messages/tx_error_messages_core_test.go @@ -12,12 +12,15 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/engine/access/index" accessmock "github.com/onflow/flow-go/engine/access/mock" - "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/irrecoverable" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" protocol "github.com/onflow/flow-go/state/protocol/mock" storage "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" @@ -37,6 +40,11 @@ type TxErrorMessagesCoreSuite struct { receipts *storage.ExecutionReceipts txErrorMessages *storage.TransactionResultErrorMessages + lightTxResults *storage.LightTransactionResults + + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + txResultsIndex *index.TransactionResultsIndex enNodeIDs flow.IdentityList execClient *accessmock.ExecutionAPIClient @@ -75,6 +83,13 @@ func (s *TxErrorMessagesCoreSuite) SetupTest() { s.receipts = storage.NewExecutionReceipts(s.T()) s.txErrorMessages = storage.NewTransactionResultErrorMessages(s.T()) + s.lightTxResults = storage.NewLightTransactionResults(s.T()) + s.reporter = syncmock.NewIndexReporter(s.T()) + s.indexReporter = index.NewReporter() + err := s.indexReporter.Initialize(s.reporter) + s.Require().NoError(err) + s.txResultsIndex = index.NewTransactionResultsIndex(s.indexReporter, s.lightTxResults) + s.rootBlock = unittest.BlockFixture() s.rootBlock.Header.Height = 0 s.finalizedBlock = unittest.BlockWithParentFixture(s.rootBlock.Header).Header @@ -135,7 +150,7 @@ func (s *TxErrorMessagesCoreSuite) TestHandleTransactionResultErrorMessages() { Return(nil).Once() core := s.initCore() - err := core.HandleTransactionResultErrorMessages(irrecoverableCtx, blockId) + err := core.FetchErrorMessages(irrecoverableCtx, blockId) require.NoError(s.T(), err) // Verify that the mock expectations for storing the error messages were met. @@ -147,7 +162,7 @@ func (s *TxErrorMessagesCoreSuite) TestHandleTransactionResultErrorMessages() { s.txErrorMessages.On("Exists", blockId). Return(true, nil).Once() s.proto.state.On("AtBlockID", blockId).Return(s.proto.snapshot).Once() - err = core.HandleTransactionResultErrorMessages(irrecoverableCtx, blockId) + err = core.FetchErrorMessages(irrecoverableCtx, blockId) require.NoError(s.T(), err) // Verify that the mock expectations for storing the error messages were not met. @@ -157,7 +172,7 @@ func (s *TxErrorMessagesCoreSuite) TestHandleTransactionResultErrorMessages() { } // TestHandleTransactionResultErrorMessages_ErrorCases tests the error handling of -// the HandleTransactionResultErrorMessages function in the following cases: +// the FetchErrorMessages function in the following cases: // // 1. Execution node fetch error: When fetching transaction error messages from the execution node fails, // the function should return an appropriate error and no further actions should be taken. @@ -188,7 +203,7 @@ func (s *TxErrorMessagesCoreSuite) TestHandleTransactionResultErrorMessages_Erro Return(nil, fmt.Errorf("execution node fetch error")).Once() core := s.initCore() - err := core.HandleTransactionResultErrorMessages(irrecoverableCtx, blockId) + err := core.FetchErrorMessages(irrecoverableCtx, blockId) // Assert that the function returns an error due to the client fetch error. require.Error(s.T(), err) @@ -220,7 +235,7 @@ func (s *TxErrorMessagesCoreSuite) TestHandleTransactionResultErrorMessages_Erro Return(fmt.Errorf("storage error")).Once() core := s.initCore() - err := core.HandleTransactionResultErrorMessages(irrecoverableCtx, blockId) + err := core.FetchErrorMessages(irrecoverableCtx, blockId) // Assert that the function returns an error due to the store error. require.Error(s.T(), err) @@ -242,25 +257,18 @@ func (s *TxErrorMessagesCoreSuite) initCore() *TxErrorMessagesCore { s.enNodeIDs.NodeIDs(), ) - // Initialize the backend - backend, err := backend.New(backend.Params{ - State: s.proto.state, - ExecutionReceipts: s.receipts, - ConnFactory: s.connFactory, - MaxHeightRange: backend.DefaultMaxHeightRange, - Log: s.log, - SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, - Communicator: backend.NewNodeCommunicator(false), - ScriptExecutionMode: backend.IndexQueryModeExecutionNodesOnly, - TxResultQueryMode: backend.IndexQueryModeExecutionNodesOnly, - ChainID: flow.Testnet, - ExecNodeIdentitiesProvider: execNodeIdentitiesProvider, - }) - require.NoError(s.T(), err) + errorMessageProvider := error_messages.NewTxErrorMessageProvider( + s.log, + s.txErrorMessages, + s.txResultsIndex, + s.connFactory, + node_communicator.NewNodeCommunicator(false), + execNodeIdentitiesProvider, + ) core := NewTxErrorMessagesCore( s.log, - backend, + errorMessageProvider, s.txErrorMessages, execNodeIdentitiesProvider, ) @@ -322,6 +330,23 @@ func setupReceiptsForBlock(receipts *storage.ExecutionReceipts, block *flow.Bloc }, nil) } +// setupReceiptsForBlockWithResult sets up mock execution receipts for a block with a specific execution result +func setupReceiptsForBlockWithResult(receipts *storage.ExecutionReceipts, executionResult *flow.ExecutionResult, executorIDs ...flow.Identifier) { + receiptList := make(flow.ExecutionReceiptList, 0, len(executorIDs)) + for _, enID := range executorIDs { + receiptList = append(receiptList, unittest.ExecutionReceiptFixture( + unittest.WithResult(executionResult), + unittest.WithExecutorID(enID), + )) + } + + receipts. + On("ByBlockID", executionResult.BlockID). + Return(func(flow.Identifier) flow.ExecutionReceiptList { + return receiptList + }, nil) +} + // createTransactionErrorMessagesResponse create TransactionErrorMessagesResponse from execution node based on results. func createTransactionErrorMessagesResponse(resultsByBlockID []flow.LightTransactionResult) *execproto.GetTransactionErrorMessagesResponse { exeErrMessagesResp := &execproto.GetTransactionErrorMessagesResponse{} diff --git a/engine/access/ingestion/tx_error_messages/tx_error_messages_engine.go b/engine/access/ingestion/tx_error_messages/tx_error_messages_engine.go index 036cd70c0df..78ea806e8be 100644 --- a/engine/access/ingestion/tx_error_messages/tx_error_messages_engine.go +++ b/engine/access/ingestion/tx_error_messages/tx_error_messages_engine.go @@ -174,7 +174,7 @@ func (e *Engine) processErrorMessagesForBlock(ctx context.Context, blockID flow. } attempt++ - err := e.txErrorMessagesCore.HandleTransactionResultErrorMessages(ctx, blockID) + err := e.txErrorMessagesCore.FetchErrorMessages(ctx, blockID) return retry.RetryableError(err) }) diff --git a/engine/access/ingestion/tx_error_messages/tx_error_messages_engine_test.go b/engine/access/ingestion/tx_error_messages/tx_error_messages_engine_test.go index a86e1943bb1..330d4f97c7a 100644 --- a/engine/access/ingestion/tx_error_messages/tx_error_messages_engine_test.go +++ b/engine/access/ingestion/tx_error_messages/tx_error_messages_engine_test.go @@ -15,13 +15,16 @@ import ( "github.com/stretchr/testify/suite" hotmodel "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/engine/access/index" accessmock "github.com/onflow/flow-go/engine/access/mock" - "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/irrecoverable" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" protocol "github.com/onflow/flow-go/state/protocol/mock" storage "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/storage/operation/badgerimpl" @@ -45,6 +48,11 @@ type TxErrorMessagesEngineSuite struct { headers *storage.Headers receipts *storage.ExecutionReceipts txErrorMessages *storage.TransactionResultErrorMessages + lightTxResults *storage.LightTransactionResults + + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + txResultsIndex *index.TransactionResultsIndex enNodeIDs flow.IdentityList execClient *accessmock.ExecutionAPIClient @@ -86,6 +94,12 @@ func (s *TxErrorMessagesEngineSuite) SetupTest() { s.headers = storage.NewHeaders(s.T()) s.receipts = storage.NewExecutionReceipts(s.T()) s.txErrorMessages = storage.NewTransactionResultErrorMessages(s.T()) + s.lightTxResults = storage.NewLightTransactionResults(s.T()) + s.reporter = syncmock.NewIndexReporter(s.T()) + s.indexReporter = index.NewReporter() + err := s.indexReporter.Initialize(s.reporter) + s.Require().NoError(err) + s.txResultsIndex = index.NewTransactionResultsIndex(s.indexReporter, s.lightTxResults) blockCount := 5 s.blockMap = make(map[uint64]*flow.Block, blockCount) @@ -145,26 +159,18 @@ func (s *TxErrorMessagesEngineSuite) initEngine(ctx irrecoverable.SignalerContex flow.IdentifierList{}, ) - // Initialize the backend with the mocked state, blocks, headers, transactions, etc. - backend, err := backend.New(backend.Params{ - State: s.proto.state, - Headers: s.headers, - ExecutionReceipts: s.receipts, - ConnFactory: s.connFactory, - MaxHeightRange: backend.DefaultMaxHeightRange, - Log: s.log, - SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, - Communicator: backend.NewNodeCommunicator(false), - ScriptExecutionMode: backend.IndexQueryModeExecutionNodesOnly, - TxResultQueryMode: backend.IndexQueryModeExecutionNodesOnly, - ChainID: flow.Testnet, - ExecNodeIdentitiesProvider: execNodeIdentitiesProvider, - }) - require.NoError(s.T(), err) + errorMessageProvider := error_messages.NewTxErrorMessageProvider( + s.log, + s.txErrorMessages, + s.txResultsIndex, + s.connFactory, + node_communicator.NewNodeCommunicator(false), + execNodeIdentitiesProvider, + ) txResultErrorMessagesCore := NewTxErrorMessagesCore( s.log, - backend, + errorMessageProvider, s.txErrorMessages, execNodeIdentitiesProvider, ) diff --git a/engine/access/ingestion2/collection_syncer.go b/engine/access/ingestion2/collection_syncer.go new file mode 100644 index 00000000000..376737c1077 --- /dev/null +++ b/engine/access/ingestion2/collection_syncer.go @@ -0,0 +1,475 @@ +package ingestion2 + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/common/fifoqueue" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +const ( + // time to wait for the all the missing collections to be received at node startup + collectionCatchupTimeout = 30 * time.Second + + // time to poll the storage to check if missing collections have been received + collectionCatchupDBPollInterval = 10 * time.Millisecond + + // time to request missing collections from the network + missingCollsRequestInterval = 1 * time.Minute + + // a threshold of number of blocks with missing collections beyond which collections should be re-requested + // this is to prevent spamming the collection nodes with request + missingCollsForBlockThreshold = 100 + + // a threshold of block height beyond which collections should be re-requested (regardless of the number of blocks for which collection are missing) + // this is to ensure that if a collection is missing for a long time (in terms of block height) it is eventually re-requested + missingCollsForAgeThreshold = 100 + + // time to update the FullBlockHeight index + fullBlockRefreshInterval = 1 * time.Second +) + +var ( + // we change these values in tests. that's why we want to have their non-const shape + defaultMissingCollsForBlockThreshold = missingCollsForBlockThreshold + defaultMissingCollsForAgeThreshold uint64 = missingCollsForAgeThreshold +) + +// The CollectionSyncer type provides mechanisms for syncing and indexing data +// from the Flow blockchain into local storage. Specifically, it handles +// the retrieval and processing of collections and transactions that may +// have been missed due to network delays, restarts, or gaps in finalization. +// +// It is responsible for ensuring the local node has +// all collections associated with finalized blocks starting from the +// last fully synced height. It works by periodically scanning the finalized +// block range, identifying missing collections, and triggering requests +// to fetch them from the network. Once collections are retrieved, it +// ensures they are persisted in the local collection and transaction stores. +// +// The syncer maintains a persistent, strictly monotonic counter +// (`lastFullBlockHeight`) to track the highest finalized block for which +// all collections have been fully indexed. It uses this information to +// avoid redundant processing and to measure catch-up progress. +// +// It is meant to operate in a background goroutine as part of the +// node's ingestion pipeline. +type CollectionSyncer struct { + logger zerolog.Logger + collectionExecutedMetric module.CollectionExecutedMetric + + state protocol.State + requester module.Requester + + // collections to be indexed + pendingCollections *engine.FifoMessageStore + pendingCollectionsHandler *engine.MessageHandler + + blocks storage.Blocks + collections storage.Collections + transactions storage.Transactions + + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter + lockManager storage.LockManager +} + +// NewCollectionSyncer creates a new CollectionSyncer responsible for requesting, +// tracking, and indexing missing collections. +func NewCollectionSyncer( + logger zerolog.Logger, + collectionExecutedMetric module.CollectionExecutedMetric, + requester module.Requester, + state protocol.State, + blocks storage.Blocks, + collections storage.Collections, + transactions storage.Transactions, + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter, + lockManager storage.LockManager, +) (*CollectionSyncer, error) { + collectionExecutedMetric.UpdateLastFullBlockHeight(lastFullBlockHeight.Value()) + + collectionsQueue, err := fifoqueue.NewFifoQueue(defaultQueueCapacity) + if err != nil { + return nil, fmt.Errorf("could not create collections queue: %w", err) + } + + pendingCollections := &engine.FifoMessageStore{FifoQueue: collectionsQueue} + pendingCollectionsHandler := engine.NewMessageHandler( + logger, + engine.NewNotifier(), + engine.Pattern{ + Match: func(msg *engine.Message) bool { + _, ok := msg.Payload.(*flow.Collection) + return ok + }, + Store: pendingCollections, + }, + ) + + return &CollectionSyncer{ + logger: logger, + state: state, + requester: requester, + pendingCollectionsHandler: pendingCollectionsHandler, + pendingCollections: pendingCollections, + blocks: blocks, + collections: collections, + transactions: transactions, + lastFullBlockHeight: lastFullBlockHeight, + collectionExecutedMetric: collectionExecutedMetric, + lockManager: lockManager, + }, nil +} + +// StartWorkerLoop continuously monitors and triggers collection sync operations. +// It handles on startup collection catchup, periodic missing collection requests, and full block height updates. +func (s *CollectionSyncer) StartWorkerLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + requestCtx, cancel := context.WithTimeout(ctx, collectionCatchupTimeout) + defer cancel() + + // on start-up, AN wants to download all missing collections to serve it to end users + err := s.requestMissingCollectionsBlocking(requestCtx) + if err != nil { + s.logger.Error().Err(err).Msg("error downloading missing collections") + } + ready() + + requestCollectionsTicker := time.NewTicker(missingCollsRequestInterval) + defer requestCollectionsTicker.Stop() + + // Collections are requested concurrently in this design. + // To maintain accurate progress tracking and avoid redundant requests, + // we periodically update the `lastFullBlockHeight` to reflect the latest + // finalized block with all collections successfully indexed. + updateLastFullBlockHeightTicker := time.NewTicker(fullBlockRefreshInterval) + defer updateLastFullBlockHeightTicker.Stop() + + for { + select { + case <-ctx.Done(): + return + + case <-requestCollectionsTicker.C: + err := s.requestMissingCollections() + if err != nil { + ctx.Throw(err) + } + + case <-updateLastFullBlockHeightTicker.C: + err := s.updateLastFullBlockHeight() + if err != nil { + ctx.Throw(err) + } + + case <-s.pendingCollectionsHandler.GetNotifier(): + msg, ok := s.pendingCollections.Get() + if !ok { + ctx.Throw(fmt.Errorf("could not get pending collection")) + } + + collection, ok := msg.Payload.(*flow.Collection) + if !ok { + ctx.Throw(fmt.Errorf("could not cast pending collection to *flow.Collection. got: %T", msg.Payload)) + return + } + + // Create a lock context for indexing + lctx := s.lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertCollection) + if err != nil { + ctx.Throw(fmt.Errorf("could not acquire lock for collection indexing: %w", err)) + return + } + defer lctx.Release() + + err = indexer.IndexCollection(lctx, collection, s.collections, s.logger, s.collectionExecutedMetric) + if err != nil { + ctx.Throw(fmt.Errorf("error indexing collection: %w", err)) + return + } + } + } +} + +// requestMissingCollections checks if missing collections should be requested based on configured +// block or age thresholds and triggers requests if needed. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) requestMissingCollections() error { + lastFullBlockHeight := s.lastFullBlockHeight.Value() + lastFinalizedBlock, err := s.state.Final().Head() + if err != nil { + return fmt.Errorf("failed to get finalized block: %w", err) + } + + collections, incompleteBlocksCount, err := s.findMissingCollections(lastFullBlockHeight) + if err != nil { + return err + } + + blocksThresholdReached := incompleteBlocksCount >= defaultMissingCollsForBlockThreshold + ageThresholdReached := lastFinalizedBlock.Height-lastFullBlockHeight > defaultMissingCollsForAgeThreshold + shouldRequest := blocksThresholdReached || ageThresholdReached + + if shouldRequest { + // warn log since generally this should not happen + s.logger.Warn(). + Uint64("finalized_height", lastFinalizedBlock.Height). + Uint64("last_full_blk_height", lastFullBlockHeight). + Int("missing_collection_blk_count", incompleteBlocksCount). + Int("missing_collection_count", len(collections)). + Msg("re-requesting missing collections") + + s.requestCollections(collections, false) + } + + return nil +} + +// requestMissingCollectionsBlocking requests and waits for all missing collections to be downloaded, +// blocking until either completion or context timeout. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) requestMissingCollectionsBlocking(ctx context.Context) error { + missingCollections, _, err := s.findMissingCollections(s.lastFullBlockHeight.Value()) + if err != nil { + return err + } + if len(missingCollections) == 0 { + s.logger.Info().Msg("skipping requesting missing collections. no missing collections found") + return nil + } + + s.requestCollections(missingCollections, true) + + collectionsToBeDownloaded := make(map[flow.Identifier]struct{}) + for _, collection := range missingCollections { + collectionsToBeDownloaded[collection.CollectionID] = struct{}{} + } + + collectionStoragePollTicker := time.NewTicker(collectionCatchupDBPollInterval) + defer collectionStoragePollTicker.Stop() + + // we want to wait for all collections to be downloaded so we poll local storage periodically to make sure each + // collection was successfully saved in the storage. + for len(collectionsToBeDownloaded) > 0 { + select { + case <-ctx.Done(): + return fmt.Errorf("failed to complete collection retrieval: %w", ctx.Err()) + + case <-collectionStoragePollTicker.C: + s.logger.Info(). + Int("total_missing_collections", len(collectionsToBeDownloaded)). + Msg("retrieving missing collections...") + + for collectionID := range collectionsToBeDownloaded { + downloaded, err := s.isCollectionInStorage(collectionID) + if err != nil { + return err + } + + if downloaded { + delete(collectionsToBeDownloaded, collectionID) + } + } + } + } + + s.logger.Info().Msg("collection catchup done") + return nil +} + +// findMissingCollections scans block heights from last known full block up to the latest finalized +// block and returns all missing collection along with the count of incomplete blocks. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) findMissingCollections(lastFullBlockHeight uint64) ([]*flow.CollectionGuarantee, int, error) { + // first block to look up collections at + firstBlockHeight := lastFullBlockHeight + 1 + + lastFinalizedBlock, err := s.state.Final().Head() + if err != nil { + return nil, 0, fmt.Errorf("failed to get finalized block: %w", err) + } + // last block to look up collections at + lastBlockHeight := lastFinalizedBlock.Height + + var missingCollections []*flow.CollectionGuarantee + var incompleteBlocksCount int + + for currBlockHeight := firstBlockHeight; currBlockHeight <= lastBlockHeight; currBlockHeight++ { + collections, err := s.findMissingCollectionsAtHeight(currBlockHeight) + if err != nil { + return nil, 0, err + } + + if len(collections) == 0 { + continue + } + + missingCollections = append(missingCollections, collections...) + incompleteBlocksCount += 1 + } + + return missingCollections, incompleteBlocksCount, nil +} + +// findMissingCollectionsAtHeight returns all missing collections for a specific block height. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) findMissingCollectionsAtHeight(height uint64) ([]*flow.CollectionGuarantee, error) { + block, err := s.blocks.ByHeight(height) + if err != nil { + return nil, fmt.Errorf("failed to retrieve block by height %d: %w", height, err) + } + + var missingCollections []*flow.CollectionGuarantee + for _, guarantee := range block.Payload.Guarantees { + inStorage, err := s.isCollectionInStorage(guarantee.CollectionID) + if err != nil { + return nil, err + } + + if !inStorage { + missingCollections = append(missingCollections, guarantee) + } + } + + return missingCollections, nil +} + +// isCollectionInStorage checks whether the given collection is present in local storage. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) isCollectionInStorage(collectionID flow.Identifier) (bool, error) { + _, err := s.collections.LightByID(collectionID) + if err == nil { + return true, nil + } + + if errors.Is(err, storage.ErrNotFound) { + return false, nil + } + + return false, fmt.Errorf("failed to retrieve collection %s: %w", collectionID.String(), err) +} + +// RequestCollectionsForBlock conditionally requests missing collections for a specific block height, +// skipping requests if the block is already below the known full block height. +func (s *CollectionSyncer) RequestCollectionsForBlock(height uint64, missingCollections []*flow.CollectionGuarantee) { + // skip requesting collections, if this block is below the last full block height. + // this means that either we have already received these collections, or the block + // may contain unverifiable guarantees (in case this node has just joined the network) + if height <= s.lastFullBlockHeight.Value() { + s.logger.Debug(). + Msg("skipping requesting collections for finalized block as its collections have been already retrieved") + return + } + + s.requestCollections(missingCollections, false) +} + +// requestCollections registers collection download requests in the requester engine, +// optionally forcing immediate dispatch. +func (s *CollectionSyncer) requestCollections(collections []*flow.CollectionGuarantee, immediately bool) { + for _, collection := range collections { + guarantors, err := protocol.FindGuarantors(s.state, collection) + if err != nil { + // failed to find guarantors for guarantees contained in a finalized block is fatal error + s.logger.Fatal().Err(err).Msgf("could not find guarantors for guarantee %v", collection.ID()) + } + s.requester.EntityByID(collection.ID(), filter.HasNodeID[flow.Identity](guarantors...)) + } + + if immediately { + s.requester.Force() + } +} + +// updateLastFullBlockHeight updates the next highest block height where all previous collections have been indexed. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) updateLastFullBlockHeight() error { + lastFullBlockHeight := s.lastFullBlockHeight.Value() + lastFinalizedBlock, err := s.state.Final().Head() + if err != nil { + return fmt.Errorf("failed to get finalized block: %w", err) + } + + // track the latest contiguous full height + newLastFullBlockHeight, err := s.findLowestBlockHeightWithMissingCollections(lastFullBlockHeight, lastFinalizedBlock.Height) + if err != nil { + return fmt.Errorf("failed to find last full block height: %w", err) + } + + // if more contiguous blocks are now complete, update db + if newLastFullBlockHeight > lastFullBlockHeight { + err := s.lastFullBlockHeight.Set(newLastFullBlockHeight) + if err != nil { + return fmt.Errorf("failed to update last full block height: %w", err) + } + + s.collectionExecutedMetric.UpdateLastFullBlockHeight(newLastFullBlockHeight) + + s.logger.Debug(). + Uint64("last_full_block_height", newLastFullBlockHeight). + Msg("updated last full block height counter") + } + + return nil +} + +// findLowestBlockHeightWithMissingCollections finds the next block height with missing collections, +// returning the latest contiguous height where all collections are present. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) findLowestBlockHeightWithMissingCollections( + lastKnownFullBlockHeight uint64, + finalizedBlockHeight uint64, +) (uint64, error) { + newLastFullBlockHeight := lastKnownFullBlockHeight + + for currBlockHeight := lastKnownFullBlockHeight + 1; currBlockHeight <= finalizedBlockHeight; currBlockHeight++ { + missingCollections, err := s.findMissingCollectionsAtHeight(currBlockHeight) + if err != nil { + return 0, err + } + + // return when we find the first block with missing collections + if len(missingCollections) > 0 { + return newLastFullBlockHeight, nil + } + + newLastFullBlockHeight = currBlockHeight + } + + return newLastFullBlockHeight, nil +} + +// OnCollectionDownloaded indexes and persists a downloaded collection. +// This function is a callback intended to be used by the requester engine. +func (s *CollectionSyncer) OnCollectionDownloaded(id flow.Identifier, entity flow.Entity) { + err := s.pendingCollectionsHandler.Process(id, entity) + if err != nil { + // this is an unexpected error condition. The only expected error returned from Process + // is for an unexpected type. since OnCollectionDownloaded is called from the requester engine, + // which is configured to only process collections, any error returned here indicates + // a bug or state corruption. + s.logger.Fatal().Err(err).Msg("failed to process pending collections") + return + } +} diff --git a/engine/access/ingestion2/engine.go b/engine/access/ingestion2/engine.go new file mode 100644 index 00000000000..3c8e42e2fde --- /dev/null +++ b/engine/access/ingestion2/engine.go @@ -0,0 +1,182 @@ +// Package ingestion2 implements a modular ingestion engine responsible for +// orchestrating the processing of finalized blockchain data and receiving +// execution receipts from the network. +// +// The Engine coordinates several internal workers, each dedicated to a specific task: +// - Receiving and persisting execution receipts from the network. +// - Subscribing to finalized block events. +// - Synchronizing collections associated with finalized blocks. +package ingestion2 + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/common/fifoqueue" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/storage" +) + +// defaultQueueCapacity is a capacity for the execution receipt message queue +const defaultQueueCapacity = 10_000 + +type Engine struct { + *component.ComponentManager + + log zerolog.Logger + + finalizedBlockProcessor *FinalizedBlockProcessor + collectionSyncer *CollectionSyncer + + messageHandler *engine.MessageHandler + executionReceiptsQueue *engine.FifoMessageStore + receipts storage.ExecutionReceipts + collectionExecutedMetric module.CollectionExecutedMetric +} + +var _ network.MessageProcessor = (*Engine)(nil) + +func New( + log zerolog.Logger, + net network.EngineRegistry, + finalizedBlockProcessor *FinalizedBlockProcessor, + collectionSyncer *CollectionSyncer, + receipts storage.ExecutionReceipts, + collectionExecutedMetric module.CollectionExecutedMetric, +) (*Engine, error) { + executionReceiptsRawQueue, err := fifoqueue.NewFifoQueue(defaultQueueCapacity) + if err != nil { + return nil, fmt.Errorf("could not create execution receipts queue: %w", err) + } + executionReceiptsQueue := &engine.FifoMessageStore{FifoQueue: executionReceiptsRawQueue} + messageHandler := engine.NewMessageHandler( + log, + engine.NewNotifier(), + engine.Pattern{ + Match: func(msg *engine.Message) bool { + _, ok := msg.Payload.(*flow.ExecutionReceipt) + return ok + }, + Store: executionReceiptsQueue, + }, + ) + + e := &Engine{ + log: log.With().Str("engine", "ingestion2").Logger(), + finalizedBlockProcessor: finalizedBlockProcessor, + collectionSyncer: collectionSyncer, + messageHandler: messageHandler, + executionReceiptsQueue: executionReceiptsQueue, + receipts: receipts, + collectionExecutedMetric: collectionExecutedMetric, + } + + // register our workers which are basically consumers of different kinds of data. + // engine notifies workers when new data is available so that they can start processing them. + builder := component.NewComponentManagerBuilder(). + AddWorker(e.messageHandlerLoop). + AddWorker(e.finalizedBlockProcessor.StartWorkerLoop). + AddWorker(e.collectionSyncer.StartWorkerLoop) + e.ComponentManager = builder.Build() + + // engine gets execution receipts from channels.ReceiveReceipts channel + _, err = net.Register(channels.ReceiveReceipts, e) + if err != nil { + return nil, fmt.Errorf("could not register engine in network to receive execution receipts: %w", err) + } + + return e, nil +} + +// Process processes the given event from the node with the given origin ID in +// a blocking manner. It returns the potential processing error when done. +// +// No errors are expected during normal operations. +func (e *Engine) Process(chanName channels.Channel, originID flow.Identifier, event interface{}) error { + select { + case <-e.ComponentManager.ShutdownSignal(): + return component.ErrComponentShutdown + default: + } + + //TODO: we don't need this type switch as message handler has this check under the hood + switch event.(type) { + case *flow.ExecutionReceipt: + err := e.messageHandler.Process(originID, event) + return err + default: + return fmt.Errorf("got invalid event type (%T) from %s channel", event, chanName) + } +} + +// messageHandlerLoop reacts to message handler notifications and processes available execution receipts +// once notification has arrived. +func (e *Engine) messageHandlerLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + + for { + select { + case <-ctx.Done(): + return + case <-e.messageHandler.GetNotifier(): + err := e.processAvailableExecutionReceipts(ctx) + if err != nil { + // if an error reaches this point, it is unexpected + ctx.Throw(err) + return + } + } + } +} + +// processAvailableExecutionReceipts processes available execution receipts in the queue and handles it. +// It continues processing until all enqueued receipts are handled or the context is canceled. +// +// No errors are expected during normal operations. +func (e *Engine) processAvailableExecutionReceipts(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + return nil + default: + } + msg, ok := e.executionReceiptsQueue.Get() + if !ok { + return nil + } + + receipt := msg.Payload.(*flow.ExecutionReceipt) + if err := e.persistExecutionReceipt(receipt); err != nil { + return err + } + } +} + +// persistExecutionReceipt persists the execution receipt. +// +// No errors are expected during normal operations. +func (e *Engine) persistExecutionReceipt(receipt *flow.ExecutionReceipt) error { + // persist the execution receipt locally, storing will also index the receipt + err := e.receipts.Store(receipt) + if err != nil { + return fmt.Errorf("failed to store execution receipt: %w", err) + } + + e.collectionExecutedMetric.ExecutionReceiptReceived(receipt) + return nil +} + +// OnFinalizedBlock is called by the follower engine after a block has been finalized and the state has been updated. +// Receives block finalized events from the finalization distributor and forwards them to the consumer. +func (e *Engine) OnFinalizedBlock(_ *model.Block) { + e.finalizedBlockProcessor.Notify() +} diff --git a/engine/access/ingestion2/engine_test.go b/engine/access/ingestion2/engine_test.go new file mode 100644 index 00000000000..4f2a78b0720 --- /dev/null +++ b/engine/access/ingestion2/engine_test.go @@ -0,0 +1,830 @@ +package ingestion2 + +import ( + "context" + "math/rand" + "os" + "sync" + "testing" + "time" + + "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + hotmodel "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/counters" + downloadermock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/module/metrics" + modulemock "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/module/signature" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/mocknetwork" + protocol "github.com/onflow/flow-go/state/protocol/mock" + storerr "github.com/onflow/flow-go/storage" + storage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +type Suite struct { + suite.Suite + + // protocol state + proto struct { + state *protocol.FollowerState + snapshot *protocol.Snapshot + params *protocol.Params + } + + me *modulemock.Local + net *mocknetwork.Network + request *modulemock.Requester + obsIdentity *flow.Identity + provider *mocknetwork.Engine + blocks *storage.Blocks + headers *storage.Headers + collections *storage.Collections + transactions *storage.Transactions + receipts *storage.ExecutionReceipts + results *storage.ExecutionResults + seals *storage.Seals + + conduit *mocknetwork.Conduit + downloader *downloadermock.Downloader + sealedBlock *flow.Header + finalizedBlock *flow.Header + log zerolog.Logger + blockMap map[uint64]*flow.Block + rootBlock flow.Block + + collectionExecutedMetric *indexer.CollectionExecutedMetricImpl + + ctx context.Context + cancel context.CancelFunc + + db *badger.DB + dbDir string + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter + lockManager lockctx.Manager +} + +func TestIngestEngine(t *testing.T) { + suite.Run(t, new(Suite)) +} + +// TearDownTest stops the engine and cleans up the db +func (s *Suite) TearDownTest() { + s.cancel() + err := os.RemoveAll(s.dbDir) + s.Require().NoError(err) +} + +func (s *Suite) SetupTest() { + s.log = unittest.Logger() + s.ctx, s.cancel = context.WithCancel(context.Background()) + s.db, s.dbDir = unittest.TempBadgerDB(s.T()) + s.lockManager = storerr.NewTestingLockManager() + + s.obsIdentity = unittest.IdentityFixture(unittest.WithRole(flow.RoleAccess)) + + s.blocks = storage.NewBlocks(s.T()) + // mock out protocol state + s.proto.state = new(protocol.FollowerState) + s.proto.snapshot = new(protocol.Snapshot) + s.proto.params = new(protocol.Params) + s.finalizedBlock = unittest.BlockHeaderFixture(unittest.WithHeaderHeight(0)) + s.proto.state.On("Identity").Return(s.obsIdentity, nil) + s.proto.state.On("Params").Return(s.proto.params) + s.proto.snapshot.On("Head").Return( + func() *flow.Header { + return s.finalizedBlock + }, + nil, + ).Maybe() + + s.me = modulemock.NewLocal(s.T()) + s.me.On("NodeID").Return(s.obsIdentity.NodeID).Maybe() + s.net = mocknetwork.NewNetwork(s.T()) + conduit := mocknetwork.NewConduit(s.T()) + s.net.On("Register", channels.ReceiveReceipts, mock.Anything). + Return(conduit, nil). + Once() + s.request = modulemock.NewRequester(s.T()) + s.provider = mocknetwork.NewEngine(s.T()) + s.blocks = storage.NewBlocks(s.T()) + s.headers = storage.NewHeaders(s.T()) + s.collections = new(storage.Collections) + s.receipts = new(storage.ExecutionReceipts) + s.transactions = new(storage.Transactions) + s.results = new(storage.ExecutionResults) + collectionsToMarkFinalized, err := stdmap.NewTimes(100) + require.NoError(s.T(), err) + collectionsToMarkExecuted, err := stdmap.NewTimes(100) + require.NoError(s.T(), err) + blocksToMarkExecuted, err := stdmap.NewTimes(100) + require.NoError(s.T(), err) + blockTransactions, err := stdmap.NewIdentifierMap(100) + require.NoError(s.T(), err) + + s.proto.state.On("Identity").Return(s.obsIdentity, nil) + s.proto.state.On("Params").Return(s.proto.params) + + blockCount := 5 + s.blockMap = make(map[uint64]*flow.Block, blockCount) + s.rootBlock = unittest.BlockFixture() + s.rootBlock.Header.Height = 0 + parent := s.rootBlock.Header + + for i := 0; i < blockCount; i++ { + block := unittest.BlockWithParentFixture(parent) + // update for next iteration + parent = block.Header + s.blockMap[block.Header.Height] = block + } + s.finalizedBlock = parent + + s.blocks.On("ByHeight", mock.AnythingOfType("uint64")).Return( + mocks.ConvertStorageOutput( + mocks.StorageMapGetter(s.blockMap), + func(block *flow.Block) *flow.Block { return block }, + ), + ).Maybe() + + s.proto.snapshot.On("Head").Return( + func() *flow.Header { + return s.finalizedBlock + }, + nil, + ).Maybe() + s.proto.state.On("Final").Return(s.proto.snapshot, nil) + + // Mock the finalized root block header with height 0. + header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(0)) + s.proto.params.On("FinalizedRoot").Return(header, nil) + + s.collectionExecutedMetric, err = indexer.NewCollectionExecutedMetricImpl( + s.log, + metrics.NewNoopCollector(), + collectionsToMarkFinalized, + collectionsToMarkExecuted, + blocksToMarkExecuted, + s.collections, + s.blocks, + blockTransactions, + ) + require.NoError(s.T(), err) +} + +func (s *Suite) TestComponentShutdown() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) + + // start then shut down the engine + unittest.AssertClosesBefore(s.T(), eng.Ready(), 10*time.Millisecond) + s.cancel() + unittest.AssertClosesBefore(s.T(), eng.Done(), 10*time.Millisecond) + + err := eng.Process(channels.ReceiveReceipts, unittest.IdentifierFixture(), &flow.ExecutionReceipt{}) + s.Assert().ErrorIs(err, component.ErrComponentShutdown) +} + +// initEngineAndSyncer create new instance of ingestion engine and collection collectionSyncer. +// It waits until the ingestion engine starts. +func (s *Suite) initEngineAndSyncer(ctx irrecoverable.SignalerContext) (*Engine, *CollectionSyncer) { + processedHeightInitializer := store.NewConsumerProgress(badgerimpl.ToDB(s.db), module.ConsumeProgressIngestionEngineBlockHeight) + + lastFullBlockHeight, err := store.NewConsumerProgress(badgerimpl.ToDB(s.db), module.ConsumeProgressLastFullBlockHeight).Initialize(s.finalizedBlock.Height) + require.NoError(s.T(), err) + + s.lastFullBlockHeight, err = counters.NewPersistentStrictMonotonicCounter(lastFullBlockHeight) + require.NoError(s.T(), err) + + syncer, err := NewCollectionSyncer( + s.log, + s.collectionExecutedMetric, + module.Requester(s.request), + s.proto.state, + s.blocks, + s.collections, + s.transactions, + s.lastFullBlockHeight, + s.lockManager, + ) + require.NoError(s.T(), err) + + blockProcessor, err := NewFinalizedBlockProcessor( + s.log, + s.proto.state, + s.blocks, + s.results, + processedHeightInitializer, + syncer, + s.collectionExecutedMetric, + ) + require.NoError(s.T(), err) + + eng, err := New( + s.log, + s.net, + blockProcessor, + syncer, + s.receipts, + s.collectionExecutedMetric, + ) + + require.NoError(s.T(), err) + + eng.ComponentManager.Start(ctx) + <-eng.Ready() + + return eng, syncer +} + +// mockCollectionsForBlock mocks collections for block +func (s *Suite) mockCollectionsForBlock(block flow.Block) { + // we should query the block once and index the guarantee payload once + for _, g := range block.Payload.Guarantees { + collection := unittest.CollectionFixture(1) + light := collection.Light() + s.collections.On("LightByID", g.CollectionID).Return(&light, nil).Twice() + } +} + +// generateBlock prepares block with payload and specified guarantee.SignerIndices +func (s *Suite) generateBlock(clusterCommittee flow.IdentitySkeletonList, snap *protocol.Snapshot) flow.Block { + block := unittest.BlockFixture() + block.SetPayload(unittest.PayloadFixture( + unittest.WithGuarantees(unittest.CollectionGuaranteesFixture(4)...), + unittest.WithExecutionResults(unittest.ExecutionResultFixture()), + unittest.WithSeals(unittest.Seal.Fixture()), + )) + + refBlockID := unittest.IdentifierFixture() + for _, guarantee := range block.Payload.Guarantees { + guarantee.ReferenceBlockID = refBlockID + // guarantee signers must be cluster committee members, so that access will fetch collection from + // the signers that are specified by guarantee.SignerIndices + indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) + require.NoError(s.T(), err) + guarantee.SignerIndices = indices + } + + s.proto.state.On("AtBlockID", refBlockID).Return(snap) + + return block +} + +// TestOnFinalizedBlock checks that when a block is received, a request for each individual collection is made +func (s *Suite) TestOnFinalizedBlockSingle() { + cluster := protocol.NewCluster(s.T()) + epoch := protocol.NewCommittedEpoch(s.T()) + epochs := protocol.NewEpochQuery(s.T()) + snap := protocol.NewSnapshot(s.T()) + + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epochs.On("Current").Return(epoch, nil) + snap.On("Epochs").Return(epochs) + + // prepare cluster committee members + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() + cluster.On("Members").Return(clusterCommittee, nil) + + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) + + block := s.generateBlock(clusterCommittee, snap) + block.Header.Height = s.finalizedBlock.Height + 1 + s.blockMap[block.Header.Height] = &block + s.mockCollectionsForBlock(block) + s.finalizedBlock = block.Header + + hotstuffBlock := hotmodel.Block{ + BlockID: block.ID(), + } + + // expect that the block storage is indexed with each of the collection guarantee + s.blocks.On("IndexBlockForCollections", block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() + for _, seal := range block.Payload.Seals { + s.results.On("Index", seal.BlockID, seal.ResultID).Return(nil).Once() + } + + missingCollectionCount := 4 + wg := sync.WaitGroup{} + wg.Add(missingCollectionCount) + + for _, cg := range block.Payload.Guarantees { + s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Run(func(args mock.Arguments) { + // Ensure the test does not complete its work faster than necessary + wg.Done() + }).Once() + } + + // process the block through the finalized callback + eng.OnFinalizedBlock(&hotstuffBlock) + + unittest.RequireReturnsBefore(s.T(), wg.Wait, 100*time.Millisecond, "expect to process new block before timeout") + + // assert that the block was retrieved and all collections were requested + s.headers.AssertExpectations(s.T()) + s.request.AssertNumberOfCalls(s.T(), "EntityByID", len(block.Payload.Guarantees)) + s.results.AssertNumberOfCalls(s.T(), "Index", len(block.Payload.Seals)) +} + +// TestOnFinalizedBlockSeveralBlocksAhead checks OnFinalizedBlock with a block several blocks newer than the last block processed +func (s *Suite) TestOnFinalizedBlockSeveralBlocksAhead() { + cluster := protocol.NewCluster(s.T()) + epoch := protocol.NewCommittedEpoch(s.T()) + epochs := protocol.NewEpochQuery(s.T()) + snap := protocol.NewSnapshot(s.T()) + + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epochs.On("Current").Return(epoch, nil) + snap.On("Epochs").Return(epochs) + + // prepare cluster committee members + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() + cluster.On("Members").Return(clusterCommittee, nil) + + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) + + newBlocksCount := 3 + startHeight := s.finalizedBlock.Height + 1 + blocks := make([]flow.Block, newBlocksCount) + + // generate the test blocks, cgs and collections + for i := 0; i < newBlocksCount; i++ { + block := s.generateBlock(clusterCommittee, snap) + block.Header.Height = startHeight + uint64(i) + s.blockMap[block.Header.Height] = &block + blocks[i] = block + s.mockCollectionsForBlock(block) + s.finalizedBlock = block.Header + } + + // latest of all the new blocks which are newer than the last block processed + latestBlock := blocks[2] + + // block several blocks newer than the last block processed + hotstuffBlock := hotmodel.Block{ + BlockID: latestBlock.ID(), + } + + missingCollectionCountPerBlock := 4 + wg := sync.WaitGroup{} + wg.Add(missingCollectionCountPerBlock * newBlocksCount) + + // expected all new blocks after last block processed + for _, block := range blocks { + s.blocks.On("IndexBlockForCollections", block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() + + for _, cg := range block.Payload.Guarantees { + s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Run(func(args mock.Arguments) { + // Ensure the test does not complete its work faster than necessary, so we can check all expected results + wg.Done() + }).Once() + } + for _, seal := range block.Payload.Seals { + s.results.On("Index", seal.BlockID, seal.ResultID).Return(nil).Once() + } + } + + eng.OnFinalizedBlock(&hotstuffBlock) + + unittest.RequireReturnsBefore(s.T(), wg.Wait, 100*time.Millisecond, "expect to process all blocks before timeout") + + expectedEntityByIDCalls := 0 + expectedIndexCalls := 0 + for _, block := range blocks { + expectedEntityByIDCalls += len(block.Payload.Guarantees) + expectedIndexCalls += len(block.Payload.Seals) + } + + s.headers.AssertExpectations(s.T()) + s.blocks.AssertNumberOfCalls(s.T(), "IndexBlockForCollections", newBlocksCount) + s.request.AssertNumberOfCalls(s.T(), "EntityByID", expectedEntityByIDCalls) + s.results.AssertNumberOfCalls(s.T(), "Index", expectedIndexCalls) +} + +// TestOnCollection checks that when a Collection is received, it is persisted +func (s *Suite) TestOnCollection() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + s.initEngineAndSyncer(irrecoverableCtx) + + collection := unittest.CollectionFixture(5) + light := collection.Light() + + // we should store the light collection and index its transactions + s.collections.On("StoreAndIndexByTransaction", mock.Anything, &collection).Return(light, nil).Once() + + // Create a lock context for indexing + lctx := s.lockManager.NewContext() + require.NoError(s.T(), lctx.AcquireLock(storerr.LockInsertCollection)) + defer lctx.Release() + + err := indexer.IndexCollection(lctx, &collection, s.collections, s.log, s.collectionExecutedMetric) + require.NoError(s.T(), err) + + // check that the collection was stored and indexed + s.collections.AssertExpectations(s.T()) +} + +// TestExecutionReceiptsAreIndexed checks that execution receipts are properly indexed +func (s *Suite) TestExecutionReceiptsAreIndexed() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) + + collection := unittest.CollectionFixture(5) + light := collection.Light() + + // we should store the light collection and index its transactions + s.collections.On("StoreAndIndexByTransaction", &collection).Return(light, nil).Once() + block := &flow.Block{ + Header: &flow.Header{Height: 0}, + Payload: &flow.Payload{Guarantees: []*flow.CollectionGuarantee{}}, + } + s.blocks.On("ByID", mock.Anything).Return(block, nil) + + // for each transaction in the collection, we should store it + needed := make(map[flow.Identifier]struct{}) + for _, txID := range light.Transactions { + needed[txID] = struct{}{} + } + s.transactions.On("Store", mock.Anything).Return(nil).Run( + func(args mock.Arguments) { + tx := args.Get(0).(*flow.TransactionBody) + _, pending := needed[tx.ID()] + s.Assert().True(pending, "tx not pending (%x)", tx.ID()) + }, + ) + er1 := unittest.ExecutionReceiptFixture() + er2 := unittest.ExecutionReceiptFixture() + + s.receipts.On("Store", mock.Anything).Return(nil) + s.blocks.On("ByID", er1.ExecutionResult.BlockID).Return(nil, storerr.ErrNotFound) + + s.receipts.On("Store", mock.Anything).Return(nil) + s.blocks.On("ByID", er2.ExecutionResult.BlockID).Return(nil, storerr.ErrNotFound) + + err := eng.persistExecutionReceipt(er1) + require.NoError(s.T(), err) + + err = eng.persistExecutionReceipt(er2) + require.NoError(s.T(), err) + + s.receipts.AssertExpectations(s.T()) + s.results.AssertExpectations(s.T()) + s.receipts.AssertExpectations(s.T()) +} + +// TestOnCollectionDuplicate checks that when a duplicate collection is received, the node doesn't +// crash but just ignores its transactions. +func (s *Suite) TestOnCollectionDuplicate() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + s.initEngineAndSyncer(irrecoverableCtx) + + collection := unittest.CollectionFixture(5) + light := collection.Light() + + // we should store the light collection and index its transactions + s.collections.On("StoreAndIndexByTransaction", mock.Anything, &collection).Return(light, storerr.ErrAlreadyExists).Once() + + // Create a lock context for indexing + lctx := s.lockManager.NewContext() + err := lctx.AcquireLock(storerr.LockInsertCollection) + require.NoError(s.T(), err) + defer lctx.Release() + + err = indexer.IndexCollection(lctx, &collection, s.collections, s.log, s.collectionExecutedMetric) + require.Error(s.T(), err) + require.ErrorIs(s.T(), err, storerr.ErrAlreadyExists) + + // check that the collection was stored and indexed + s.collections.AssertExpectations(s.T()) +} + +// TestRequestMissingCollections tests that the all missing collections are requested on the call to requestMissingCollections +func (s *Suite) TestRequestMissingCollections() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + _, syncer := s.initEngineAndSyncer(irrecoverableCtx) + + blkCnt := 3 + startHeight := uint64(1000) + + // prepare cluster committee members + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() + + // generate the test blocks and collections + var collIDs []flow.Identifier + refBlockID := unittest.IdentifierFixture() + for i := 0; i < blkCnt; i++ { + block := unittest.BlockFixture() + block.SetPayload(unittest.PayloadFixture( + unittest.WithGuarantees( + unittest.CollectionGuaranteesFixture(4, unittest.WithCollRef(refBlockID))...), + )) + // some blocks may not be present hence add a gap + height := startHeight + uint64(i) + block.Header.Height = height + s.blockMap[block.Header.Height] = &block + s.finalizedBlock = block.Header + + for _, c := range block.Payload.Guarantees { + collIDs = append(collIDs, c.CollectionID) + c.ReferenceBlockID = refBlockID + + // guarantee signers must be cluster committee members, so that access will fetch collection from + // the signers that are specified by guarantee.SignerIndices + indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) + require.NoError(s.T(), err) + c.SignerIndices = indices + } + } + + // consider collections are missing for all blocks + err := s.lastFullBlockHeight.Set(startHeight - 1) + s.Require().NoError(err) + + // consider the last test block as the head + + // p is the probability of not receiving the collection before the next poll and it + // helps simulate the slow trickle of the requested collections being received + var p float32 + + // rcvdColl is the map simulating the collection storage key-values + rcvdColl := make(map[flow.Identifier]struct{}) + + // for the first lookup call for each collection, it will be reported as missing from db + // for the subsequent calls, it will be reported as present with the probability p + s.collections.On("LightByID", mock.Anything).Return( + func(cID flow.Identifier) *flow.LightCollection { + return nil // the actual collection object return is never really read + }, + func(cID flow.Identifier) error { + if _, ok := rcvdColl[cID]; ok { + return nil + } + if rand.Float32() >= p { + rcvdColl[cID] = struct{}{} + } + return storerr.ErrNotFound + }). + // simulate some db i/o contention + After(time.Millisecond * time.Duration(rand.Intn(5))) + + // setup the requester engine mock + // entityByID should be called once per collection + for _, c := range collIDs { + s.request.On("EntityByID", c, mock.Anything).Return() + } + // force should be called once + s.request.On("Force").Return() + + cluster := protocol.NewCluster(s.T()) + cluster.On("Members").Return(clusterCommittee, nil) + epoch := protocol.NewCommittedEpoch(s.T()) + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epochs := protocol.NewEpochQuery(s.T()) + epochs.On("Current").Return(epoch, nil) + snap := protocol.NewSnapshot(s.T()) + snap.On("Epochs").Return(epochs) + s.proto.state.On("AtBlockID", refBlockID).Return(snap) + + assertExpectations := func() { + s.request.AssertExpectations(s.T()) + s.collections.AssertExpectations(s.T()) + s.proto.snapshot.AssertExpectations(s.T()) + s.blocks.AssertExpectations(s.T()) + } + + // test 1 - collections are not received before timeout + s.Run("timeout before all missing collections are received", func() { + + // simulate that collection are never received + p = 1 + + // timeout after 3 db polls + ctx, cancel := context.WithTimeout(context.Background(), 100*collectionCatchupDBPollInterval) + defer cancel() + + err := syncer.requestMissingCollectionsBlocking(ctx) + + require.Error(s.T(), err) + require.Contains(s.T(), err.Error(), "context deadline exceeded") + + assertExpectations() + }) + // test 2 - all collections are eventually received before the deadline + s.Run("all missing collections are received", func() { + + // 90% of the time, collections are reported as not received when the collection storage is queried + p = 0.9 + + ctx, cancel := context.WithTimeout(context.Background(), collectionCatchupTimeout) + defer cancel() + + err := syncer.requestMissingCollectionsBlocking(ctx) + + require.NoError(s.T(), err) + require.Len(s.T(), rcvdColl, len(collIDs)) + + assertExpectations() + }) +} + +// TestProcessBackgroundCalls tests that updateLastFullBlockHeight and checkMissingCollections +// function calls keep the FullBlockIndex up-to-date and request collections if blocks with missing +// collections exceed the threshold. +func (s *Suite) TestProcessBackgroundCalls() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + _, syncer := s.initEngineAndSyncer(irrecoverableCtx) + + blkCnt := 3 + collPerBlk := 10 + startHeight := uint64(1000) + blocks := make([]flow.Block, blkCnt) + collMap := make(map[flow.Identifier]*flow.LightCollection, blkCnt*collPerBlk) + + // prepare cluster committee members + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() + + refBlockID := unittest.IdentifierFixture() + // generate the test blocks, cgs and collections + for i := 0; i < blkCnt; i++ { + guarantees := make([]*flow.CollectionGuarantee, collPerBlk) + for j := 0; j < collPerBlk; j++ { + coll := unittest.CollectionFixture(2).Light() + collMap[coll.ID()] = &coll + cg := unittest.CollectionGuaranteeFixture(func(cg *flow.CollectionGuarantee) { + cg.CollectionID = coll.ID() + cg.ReferenceBlockID = refBlockID + }) + + // guarantee signers must be cluster committee members, so that access will fetch collection from + // the signers that are specified by guarantee.SignerIndices + indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) + require.NoError(s.T(), err) + cg.SignerIndices = indices + guarantees[j] = cg + } + block := unittest.BlockFixture() + block.SetPayload(unittest.PayloadFixture(unittest.WithGuarantees(guarantees...))) + // set the height + height := startHeight + uint64(i) + block.Header.Height = height + s.blockMap[block.Header.Height] = &block + blocks[i] = block + s.finalizedBlock = block.Header + } + + finalizedHeight := s.finalizedBlock.Height + + cluster := protocol.NewCluster(s.T()) + cluster.On("Members").Return(clusterCommittee, nil) + epoch := protocol.NewCommittedEpoch(s.T()) + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epochs := protocol.NewEpochQuery(s.T()) + epochs.On("Current").Return(epoch, nil) + snap := protocol.NewSnapshot(s.T()) + snap.On("Epochs").Return(epochs) + s.proto.state.On("AtBlockID", refBlockID).Return(snap) + + // blkMissingColl controls which collections are reported as missing by the collections storage mock + blkMissingColl := make([]bool, blkCnt) + for i := 0; i < blkCnt; i++ { + blkMissingColl[i] = false + for _, cg := range blocks[i].Payload.Guarantees { + j := i + s.collections.On("LightByID", cg.CollectionID).Return( + func(cID flow.Identifier) *flow.LightCollection { + return collMap[cID] + }, + func(cID flow.Identifier) error { + if blkMissingColl[j] { + return storerr.ErrNotFound + } + return nil + }) + } + } + + rootBlk := blocks[0] + + // root block is the last complete block + err := s.lastFullBlockHeight.Set(rootBlk.Header.Height) + s.Require().NoError(err) + + s.Run("missing collections are requested when count exceeds defaultMissingCollsForBlockThreshold", func() { + // lower the block threshold to request missing collections + defaultMissingCollsForBlockThreshold = 2 + + // mark all blocks beyond the root block as incomplete + for i := 1; i < blkCnt; i++ { + blkMissingColl[i] = true + // setup receive engine expectations + for _, cg := range blocks[i].Payload.Guarantees { + s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Once() + } + } + + err := syncer.requestMissingCollections() + s.Require().NoError(err) + + // assert that missing collections are requested + s.request.AssertExpectations(s.T()) + + // last full blk index is not advanced + s.blocks.AssertExpectations(s.T()) // no new call to UpdateLastFullBlockHeight should be made + }) + + s.Run("missing collections are requested when count exceeds defaultMissingCollsForAgeThreshold", func() { + // lower the height threshold to request missing collections + defaultMissingCollsForAgeThreshold = 1 + + // raise the block threshold to ensure it does not trigger missing collection request + defaultMissingCollsForBlockThreshold = blkCnt + 1 + + // mark all blocks beyond the root block as incomplete + for i := 1; i < blkCnt; i++ { + blkMissingColl[i] = true + // setup receive engine expectations + for _, cg := range blocks[i].Payload.Guarantees { + s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Once() + } + } + + err := syncer.requestMissingCollections() + s.Require().NoError(err) + + // assert that missing collections are requested + s.request.AssertExpectations(s.T()) + + // last full blk index is not advanced + s.blocks.AssertExpectations(s.T()) // not new call to UpdateLastFullBlockHeight should be made + }) + + s.Run("missing collections are not requested if defaultMissingCollsForBlockThreshold not reached", func() { + // raise the thresholds to avoid requesting missing collections + defaultMissingCollsForAgeThreshold = 3 + defaultMissingCollsForBlockThreshold = 3 + + // mark all blocks beyond the root block as incomplete + for i := 1; i < blkCnt; i++ { + blkMissingColl[i] = true + } + + err := syncer.requestMissingCollections() + s.Require().NoError(err) + + // assert that missing collections are not requested even though there are collections missing + s.request.AssertExpectations(s.T()) + + // last full blk index is not advanced + s.blocks.AssertExpectations(s.T()) // not new call to UpdateLastFullBlockHeight should be made + }) + + // create new block + finalizedBlk := unittest.BlockFixture() + height := blocks[blkCnt-1].Header.Height + 1 + finalizedBlk.Header.Height = height + s.blockMap[height] = &finalizedBlk + + finalizedHeight = finalizedBlk.Header.Height + s.finalizedBlock = finalizedBlk.Header + + blockBeforeFinalized := blocks[blkCnt-1].Header + + s.Run("full block height index is advanced if newer full blocks are discovered", func() { + // set lastFullBlockHeight to block + err = s.lastFullBlockHeight.Set(blockBeforeFinalized.Height) + s.Require().NoError(err) + + err = syncer.updateLastFullBlockHeight() + s.Require().NoError(err) + s.Require().Equal(finalizedHeight, s.lastFullBlockHeight.Value()) + s.Require().NoError(err) + + s.blocks.AssertExpectations(s.T()) + }) + + s.Run("full block height index is not advanced beyond finalized blocks", func() { + err = syncer.updateLastFullBlockHeight() + s.Require().NoError(err) + + s.Require().Equal(finalizedHeight, s.lastFullBlockHeight.Value()) + s.blocks.AssertExpectations(s.T()) + }) +} diff --git a/engine/access/ingestion2/finalized_block_processor.go b/engine/access/ingestion2/finalized_block_processor.go new file mode 100644 index 00000000000..63f1ebb9592 --- /dev/null +++ b/engine/access/ingestion2/finalized_block_processor.go @@ -0,0 +1,165 @@ +package ingestion2 + +import ( + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/jobqueue" + "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +const ( + // finalizedBlockProcessorWorkerCount defines the number of workers that + // concurrently process finalized blocks in the job queue. + // MUST be 1 to ensure sequential processing + finalizedBlockProcessorWorkerCount = 1 + + // searchAhead is a number of blocks that should be processed ahead by jobqueue + // MUST be 1 to ensure sequential processing + searchAhead = 1 +) + +// FinalizedBlockProcessor handles processing of finalized blocks, +// including indexing and syncing of related collections and execution results. +// +// FinalizedBlockProcessor is designed to handle the ingestion of finalized Flow blocks +// in a scalable and decoupled manner. It uses a jobqueue.ComponentConsumer to consume +// and process finalized block jobs asynchronously. This design enables the processor +// to handle high-throughput block finalization events without blocking other parts +// of the system. +// +// The processor relies on a notifier (engine.Notifier) to signal when a new finalized +// block is available, which triggers the job consumer to process it. The actual +// processing involves indexing block-to-collection and block-to-execution-result +// mappings, as well as requesting the associated collections. +type FinalizedBlockProcessor struct { + log zerolog.Logger + + consumer *jobqueue.ComponentConsumer + consumerNotifier engine.Notifier + blocks storage.Blocks + + executionResults storage.ExecutionResults + + collectionSyncer *CollectionSyncer + collectionExecutedMetric module.CollectionExecutedMetric +} + +// NewFinalizedBlockProcessor creates and initializes a new FinalizedBlockProcessor, +// setting up job consumer infrastructure to handle finalized block processing. +// +// No errors are expected during normal operations. +func NewFinalizedBlockProcessor( + log zerolog.Logger, + state protocol.State, + blocks storage.Blocks, + executionResults storage.ExecutionResults, + finalizedProcessedHeight storage.ConsumerProgressInitializer, + syncer *CollectionSyncer, + collectionExecutedMetric module.CollectionExecutedMetric, +) (*FinalizedBlockProcessor, error) { + reader := jobqueue.NewFinalizedBlockReader(state, blocks) + finalizedBlock, err := state.Final().Head() + if err != nil { + return nil, fmt.Errorf("could not get finalized block header: %w", err) + } + + consumerNotifier := engine.NewNotifier() + processor := &FinalizedBlockProcessor{ + log: log, + blocks: blocks, + executionResults: executionResults, + consumerNotifier: consumerNotifier, + collectionSyncer: syncer, + collectionExecutedMetric: collectionExecutedMetric, + } + + processor.consumer, err = jobqueue.NewComponentConsumer( + log.With().Str("module", "ingestion_block_consumer").Logger(), + consumerNotifier.Channel(), + finalizedProcessedHeight, + reader, + finalizedBlock.Height, + processor.processFinalizedBlockJobCallback, + finalizedBlockProcessorWorkerCount, + searchAhead, + ) + if err != nil { + return nil, fmt.Errorf("error creating finalized block jobqueue: %w", err) + } + + return processor, nil +} + +// Notify notifies the processor that a new finalized block is available for processing. +func (p *FinalizedBlockProcessor) Notify() { + p.consumerNotifier.Notify() +} + +// StartWorkerLoop begins processing of finalized blocks and signals readiness when initialization is complete. +func (p *FinalizedBlockProcessor) StartWorkerLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + p.consumer.Start(ctx) + + err := util.WaitClosed(ctx, p.consumer.Ready()) + if err == nil { + ready() + } + + <-p.consumer.Done() +} + +// processFinalizedBlockJobCallback is a jobqueue callback that processes a finalized block job. +func (p *FinalizedBlockProcessor) processFinalizedBlockJobCallback( + ctx irrecoverable.SignalerContext, + job module.Job, + done func(), +) { + block, err := jobqueue.JobToBlock(job) + if err != nil { + ctx.Throw(fmt.Errorf("failed to convert job to block: %w", err)) + return + } + + err = p.indexFinalizedBlock(block) + if err != nil { + p.log.Error().Err(err). + Str("job_id", string(job.ID())). + Msg("unexpected error during finalized block processing job") + ctx.Throw(fmt.Errorf("failed to index finalized block: %w", err)) + return + } + + done() +} + +// indexFinalizedBlock indexes the given finalized block’s collection guarantees and execution results, +// and requests related collections from the syncer. +// +// No errors are expected during normal operations. +func (p *FinalizedBlockProcessor) indexFinalizedBlock(block *flow.Block) error { + err := p.blocks.IndexBlockForCollections(block.Header.ID(), flow.GetIDs(block.Payload.Guarantees)) + if err != nil { + return fmt.Errorf("could not index block for collections: %w", err) + } + + // loop through seals and index ID -> result ID + for _, seal := range block.Payload.Seals { + err := p.executionResults.Index(seal.BlockID, seal.ResultID) + if err != nil { + return fmt.Errorf("could not index block for execution result: %w", err) + } + } + + p.collectionSyncer.RequestCollectionsForBlock(block.Header.Height, block.Payload.Guarantees) + p.collectionExecutedMetric.BlockFinalized(block) + + return nil +} diff --git a/engine/access/integration_unsecure_grpc_server_test.go b/engine/access/integration_unsecure_grpc_server_test.go index 71496409810..553549eccde 100644 --- a/engine/access/integration_unsecure_grpc_server_test.go +++ b/engine/access/integration_unsecure_grpc_server_test.go @@ -24,6 +24,8 @@ import ( "github.com/onflow/flow-go/engine/access/rest/websockets" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" "github.com/onflow/flow-go/engine/access/state_stream" statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" "github.com/onflow/flow-go/engine/access/subscription" @@ -197,7 +199,10 @@ func (suite *SameGRPCPortTestSuite) SetupTest() { MaxHeightRange: 0, Log: suite.log, SnapshotHistoryLimit: 0, - Communicator: backend.NewNodeCommunicator(false), + Communicator: node_communicator.NewNodeCommunicator(false), + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, }) require.NoError(suite.T(), err) diff --git a/engine/access/rest/http/routes/blocks.go b/engine/access/rest/http/routes/blocks.go index 875d5367181..79b2f580719 100644 --- a/engine/access/rest/http/routes/blocks.go +++ b/engine/access/rest/http/routes/blocks.go @@ -195,13 +195,13 @@ func NewBlockProvider(backend access.API, options ...blockProviderOption) *block func (blkProvider *blockProvider) getBlock(ctx context.Context) (*flow.Block, flow.BlockStatus, error) { if blkProvider.id != nil { - blk, _, err := blkProvider.backend.GetBlockByID(ctx, *blkProvider.id) + blk, status, err := blkProvider.backend.GetBlockByID(ctx, *blkProvider.id) if err != nil { // unfortunately backend returns internal error status if not found return nil, flow.BlockStatusUnknown, common.NewNotFoundError( fmt.Sprintf("error looking up block with ID %s", blkProvider.id.String()), err, ) } - return blk, flow.BlockStatusUnknown, nil + return blk, status, nil } if blkProvider.latest { diff --git a/engine/access/rest/http/routes/blocks_test.go b/engine/access/rest/http/routes/blocks_test.go index 44ff77584e6..0b6c44da77c 100644 --- a/engine/access/rest/http/routes/blocks_test.go +++ b/engine/access/rest/http/routes/blocks_test.go @@ -36,17 +36,15 @@ func prepareTestVectors(t *testing.T, executionResults []*flow.ExecutionResult, blkCnt int) []testVector { - singleBlockExpandedResponse := expectedBlockResponsesExpanded(blocks[:1], executionResults[:1], true, flow.BlockStatusUnknown) - singleSealedBlockExpandedResponse := expectedBlockResponsesExpanded(blocks[:1], executionResults[:1], true, flow.BlockStatusSealed) - multipleBlockExpandedResponse := expectedBlockResponsesExpanded(blocks, executionResults, true, flow.BlockStatusUnknown) - multipleSealedBlockExpandedResponse := expectedBlockResponsesExpanded(blocks, executionResults, true, flow.BlockStatusSealed) + singleBlockExpandedResponse := expectedBlockResponsesExpanded(blocks[:1], executionResults[:1], true, flow.BlockStatusSealed) + multipleBlockExpandedResponse := expectedBlockResponsesExpanded(blocks, executionResults, true, flow.BlockStatusSealed) - singleBlockCondensedResponse := expectedBlockResponsesExpanded(blocks[:1], executionResults[:1], false, flow.BlockStatusUnknown) - multipleBlockCondensedResponse := expectedBlockResponsesExpanded(blocks, executionResults, false, flow.BlockStatusUnknown) + singleBlockCondensedResponse := expectedBlockResponsesExpanded(blocks[:1], executionResults[:1], false, flow.BlockStatusSealed) + multipleBlockCondensedResponse := expectedBlockResponsesExpanded(blocks, executionResults, false, flow.BlockStatusSealed) - multipleBlockHeaderWithHeaderSelectedResponse := expectedBlockResponsesSelected(blocks, executionResults, flow.BlockStatusUnknown, []string{"header"}) - multipleBlockHeaderWithHeaderAndStatusSelectedResponse := expectedBlockResponsesSelected(blocks, executionResults, flow.BlockStatusUnknown, []string{"header", "block_status"}) - multipleBlockHeaderWithUnknownSelectedResponse := expectedBlockResponsesSelected(blocks, executionResults, flow.BlockStatusUnknown, []string{"unknown"}) + multipleBlockHeaderWithHeaderSelectedResponse := expectedBlockResponsesSelected(blocks, executionResults, flow.BlockStatusSealed, []string{"header"}) + multipleBlockHeaderWithHeaderAndStatusSelectedResponse := expectedBlockResponsesSelected(blocks, executionResults, flow.BlockStatusSealed, []string{"header", "block_status"}) + multipleBlockHeaderWithUnknownSelectedResponse := expectedBlockResponsesSelected(blocks, executionResults, flow.BlockStatusSealed, []string{"unknown"}) invalidID := unittest.IdentifierFixture().String() invalidHeight := fmt.Sprintf("%d", blkCnt+1) @@ -82,19 +80,19 @@ func prepareTestVectors(t *testing.T, description: "Get single expanded block by height", request: getByHeightsExpandedURL(t, heights[:1]...), expectedStatus: http.StatusOK, - expectedResponse: singleSealedBlockExpandedResponse, + expectedResponse: singleBlockExpandedResponse, }, { description: "Get multiple expanded blocks by heights", request: getByHeightsExpandedURL(t, heights...), expectedStatus: http.StatusOK, - expectedResponse: multipleSealedBlockExpandedResponse, + expectedResponse: multipleBlockExpandedResponse, }, { description: "Get multiple expanded blocks by start and end height", request: getByStartEndHeightExpandedURL(t, heights[0], heights[len(heights)-1]), expectedStatus: http.StatusOK, - expectedResponse: multipleSealedBlockExpandedResponse, + expectedResponse: multipleBlockExpandedResponse, }, { description: "Get block by ID not found", diff --git a/engine/access/rest_api_test.go b/engine/access/rest_api_test.go index 58dac34a8e5..c7a72165a16 100644 --- a/engine/access/rest_api_test.go +++ b/engine/access/rest_api_test.go @@ -27,6 +27,8 @@ import ( "github.com/onflow/flow-go/engine/access/rest/websockets" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/grpcserver" @@ -176,7 +178,10 @@ func (suite *RestAPITestSuite) SetupTest() { MaxHeightRange: 0, Log: suite.log, SnapshotHistoryLimit: 0, - Communicator: backend.NewNodeCommunicator(false), + Communicator: node_communicator.NewNodeCommunicator(false), + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, }) require.NoError(suite.T(), err) @@ -241,6 +246,7 @@ func (suite *RestAPITestSuite) TestGetBlock() { block.Header.Height = uint64(i) suite.blocks.On("ByID", block.ID()).Return(block, nil) suite.blocks.On("ByHeight", block.Header.Height).Return(block, nil) + suite.headers.On("BlockIDByHeight", block.Header.Height).Return(block.ID(), nil) testBlocks[i] = block testBlockIDs[i] = block.ID().String() diff --git a/engine/access/rpc/backend/accounts/accounts.go b/engine/access/rpc/backend/accounts/accounts.go new file mode 100644 index 00000000000..76eb072fe10 --- /dev/null +++ b/engine/access/rpc/backend/accounts/accounts.go @@ -0,0 +1,241 @@ +package accounts + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rpc/backend/accounts/provider" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + "github.com/onflow/flow-go/engine/access/rpc/connection" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +type Accounts struct { + log zerolog.Logger + state protocol.State + headers storage.Headers + provider provider.AccountProvider +} + +var _ access.AccountsAPI = (*Accounts)(nil) + +func NewAccountsBackend( + log zerolog.Logger, + state protocol.State, + headers storage.Headers, + connFactory connection.ConnectionFactory, + nodeCommunicator node_communicator.Communicator, + scriptExecMode query_mode.IndexQueryMode, + scriptExecutor execution.ScriptExecutor, + execNodeIdentitiesProvider *commonrpc.ExecutionNodeIdentitiesProvider, +) (*Accounts, error) { + var accountProvider provider.AccountProvider + + switch scriptExecMode { + case query_mode.IndexQueryModeLocalOnly: + accountProvider = provider.NewLocalAccountProvider(log, state, scriptExecutor) + + case query_mode.IndexQueryModeExecutionNodesOnly: + accountProvider = provider.NewENAccountProvider(log, state, connFactory, nodeCommunicator, execNodeIdentitiesProvider) + + case query_mode.IndexQueryModeFailover: + local := provider.NewLocalAccountProvider(log, state, scriptExecutor) + execNode := provider.NewENAccountProvider(log, state, connFactory, nodeCommunicator, execNodeIdentitiesProvider) + accountProvider = provider.NewFailoverAccountProvider(log, state, local, execNode) + + case query_mode.IndexQueryModeCompare: + local := provider.NewLocalAccountProvider(log, state, scriptExecutor) + execNode := provider.NewENAccountProvider(log, state, connFactory, nodeCommunicator, execNodeIdentitiesProvider) + accountProvider = provider.NewComparingAccountProvider(log, state, local, execNode) + + default: + return nil, fmt.Errorf("unknown execution mode: %v", scriptExecMode) + } + + return &Accounts{ + log: log, + state: state, + headers: headers, + provider: accountProvider, + }, nil +} + +// GetAccount returns the account details at the latest sealed block. +// Alias for GetAccountAtLatestBlock +func (a *Accounts) GetAccount(ctx context.Context, address flow.Address) (*flow.Account, error) { + return a.GetAccountAtLatestBlock(ctx, address) +} + +// GetAccountAtLatestBlock returns the account details at the latest sealed block. +func (a *Accounts) GetAccountAtLatestBlock(ctx context.Context, address flow.Address) (*flow.Account, error) { + sealed, err := a.state.Sealed().Head() + if err != nil { + err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, err + } + + sealedBlockID := sealed.ID() + account, err := a.provider.GetAccountAtBlock(ctx, address, sealedBlockID, sealed.Height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account at blockID: %v", sealedBlockID) + return nil, err + } + + return account, nil +} + +// GetAccountAtBlockHeight returns the account details at the given block height. +func (a *Accounts) GetAccountAtBlockHeight( + ctx context.Context, + address flow.Address, + height uint64, +) (*flow.Account, error) { + blockID, err := a.headers.BlockIDByHeight(height) + if err != nil { + return nil, commonrpc.ConvertStorageError(common.ResolveHeightError(a.state.Params(), height, err)) + } + + account, err := a.provider.GetAccountAtBlock(ctx, address, blockID, height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account at height: %d", height) + return nil, err + } + + return account, nil +} + +// GetAccountBalanceAtLatestBlock returns the account balance at the latest sealed block. +func (a *Accounts) GetAccountBalanceAtLatestBlock(ctx context.Context, address flow.Address) (uint64, error) { + sealed, err := a.state.Sealed().Head() + if err != nil { + err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return 0, err + } + + sealedBlockID := sealed.ID() + balance, err := a.provider.GetAccountBalanceAtBlock(ctx, address, sealedBlockID, sealed.Height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account balance at blockID: %v", sealedBlockID) + return 0, err + } + + return balance, nil +} + +// GetAccountBalanceAtBlockHeight returns the account balance at the given block height. +func (a *Accounts) GetAccountBalanceAtBlockHeight( + ctx context.Context, + address flow.Address, + height uint64, +) (uint64, error) { + blockID, err := a.headers.BlockIDByHeight(height) + if err != nil { + return 0, commonrpc.ConvertStorageError(common.ResolveHeightError(a.state.Params(), height, err)) + } + + balance, err := a.provider.GetAccountBalanceAtBlock(ctx, address, blockID, height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account balance at height: %v", height) + return 0, err + } + + return balance, nil +} + +// GetAccountKeyAtLatestBlock returns the account public key at the latest sealed block. +func (a *Accounts) GetAccountKeyAtLatestBlock( + ctx context.Context, + address flow.Address, + keyIndex uint32, +) (*flow.AccountPublicKey, error) { + sealed, err := a.state.Sealed().Head() + if err != nil { + err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, err + } + + sealedBlockID := sealed.ID() + key, err := a.provider.GetAccountKeyAtBlock(ctx, address, keyIndex, sealedBlockID, sealed.Height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account key at blockID: %v", sealedBlockID) + return nil, err + } + + return key, nil +} + +// GetAccountKeyAtBlockHeight returns the account public key by key index at the given block height. +func (a *Accounts) GetAccountKeyAtBlockHeight( + ctx context.Context, + address flow.Address, + keyIndex uint32, + height uint64, +) (*flow.AccountPublicKey, error) { + blockID, err := a.headers.BlockIDByHeight(height) + if err != nil { + return nil, commonrpc.ConvertStorageError(common.ResolveHeightError(a.state.Params(), height, err)) + } + + key, err := a.provider.GetAccountKeyAtBlock(ctx, address, keyIndex, blockID, height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account key at height: %v", height) + return nil, err + } + + return key, nil +} + +// GetAccountKeysAtLatestBlock returns the account public keys at the latest sealed block. +func (a *Accounts) GetAccountKeysAtLatestBlock( + ctx context.Context, + address flow.Address, +) ([]flow.AccountPublicKey, error) { + sealed, err := a.state.Sealed().Head() + if err != nil { + err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, err + } + + sealedBlockID := sealed.ID() + keys, err := a.provider.GetAccountKeysAtBlock(ctx, address, sealedBlockID, sealed.Height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account keys at blockID: %v", sealedBlockID) + return nil, err + } + + return keys, nil +} + +// GetAccountKeysAtBlockHeight returns the account public keys at the given block height. +func (a *Accounts) GetAccountKeysAtBlockHeight( + ctx context.Context, + address flow.Address, + height uint64, +) ([]flow.AccountPublicKey, error) { + blockID, err := a.headers.BlockIDByHeight(height) + if err != nil { + return nil, commonrpc.ConvertStorageError(common.ResolveHeightError(a.state.Params(), height, err)) + } + + keys, err := a.provider.GetAccountKeysAtBlock(ctx, address, blockID, height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account keys at height: %v", height) + return nil, err + } + + return keys, nil +} diff --git a/engine/access/rpc/backend/backend_accounts_test.go b/engine/access/rpc/backend/accounts/accounts_test.go similarity index 80% rename from engine/access/rpc/backend/backend_accounts_test.go rename to engine/access/rpc/backend/accounts/accounts_test.go index 129b5679e33..5af3fede1eb 100644 --- a/engine/access/rpc/backend/backend_accounts_test.go +++ b/engine/access/rpc/backend/accounts/accounts_test.go @@ -1,4 +1,4 @@ -package backend +package accounts import ( "context" @@ -7,11 +7,14 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" access "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/engine/common/rpc/convert" @@ -27,7 +30,7 @@ import ( execproto "github.com/onflow/flow/protobuf/go/flow/execution" ) -type BackendAccountsSuite struct { +type AccountsSuite struct { suite.Suite log zerolog.Logger @@ -50,10 +53,10 @@ type BackendAccountsSuite struct { } func TestBackendAccountsSuite(t *testing.T) { - suite.Run(t, new(BackendAccountsSuite)) + suite.Run(t, new(AccountsSuite)) } -func (s *BackendAccountsSuite) SetupTest() { +func (s *AccountsSuite) SetupTest() { s.log = unittest.Logger() s.state = protocol.NewState(s.T()) s.snapshot = protocol.NewSnapshot(s.T()) @@ -77,76 +80,15 @@ func (s *BackendAccountsSuite) SetupTest() { s.failingAddress = unittest.AddressFixture() } -func (s *BackendAccountsSuite) defaultBackend() *backendAccounts { - return &backendAccounts{ - log: s.log, - state: s.state, - headers: s.headers, - connFactory: s.connectionFactory, - nodeCommunicator: NewNodeCommunicator(false), - execNodeIdentitiesProvider: commonrpc.NewExecutionNodeIdentitiesProvider( - s.log, - s.state, - s.receipts, - flow.IdentifierList{}, - flow.IdentifierList{}, - ), - } -} - -// setupExecutionNodes sets up the mocks required to test against an EN backend -func (s *BackendAccountsSuite) setupExecutionNodes(block *flow.Block) { - s.params.On("FinalizedRoot").Return(s.rootHeader, nil) - s.state.On("Params").Return(s.params) - s.state.On("Final").Return(s.snapshot) - s.snapshot.On("Identities", mock.Anything).Return(s.executionNodes, nil) - - // this line causes a S1021 lint error because receipts is explicitly declared. this is required - // to ensure the mock library handles the response type correctly - var receipts flow.ExecutionReceiptList //nolint:gosimple - receipts = unittest.ReceiptsForBlockFixture(block, s.executionNodes.NodeIDs()) - s.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) - - s.connectionFactory.On("GetExecutionAPIClient", mock.Anything). - Return(s.execClient, &mocks.MockCloser{}, nil) -} - -// setupENSuccessResponse configures the execution node client to return a successful response -func (s *BackendAccountsSuite) setupENSuccessResponse(blockID flow.Identifier) { - expectedExecRequest := &execproto.GetAccountAtBlockIDRequest{ - BlockId: blockID[:], - Address: s.account.Address.Bytes(), - } - - convertedAccount, err := convert.AccountToMessage(s.account) - s.Require().NoError(err) - - s.execClient.On("GetAccountAtBlockID", mock.Anything, expectedExecRequest). - Return(&execproto.GetAccountAtBlockIDResponse{ - Account: convertedAccount, - }, nil) -} - -// setupENFailingResponse configures the execution node client to return an error -func (s *BackendAccountsSuite) setupENFailingResponse(blockID flow.Identifier, err error) { - failingRequest := &execproto.GetAccountAtBlockIDRequest{ - BlockId: blockID[:], - Address: s.failingAddress.Bytes(), - } - - s.execClient.On("GetAccountAtBlockID", mock.Anything, failingRequest). - Return(nil, err) -} - // TestGetAccountFromExecutionNode_HappyPath tests successfully getting accounts from execution nodes -func (s *BackendAccountsSuite) TestGetAccountFromExecutionNode_HappyPath() { +func (s *AccountsSuite) TestGetAccountFromExecutionNode_HappyPath() { ctx := context.Background() s.setupExecutionNodes(s.block) s.setupENSuccessResponse(s.block.ID()) - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeExecutionNodesOnly + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeExecutionNodesOnly, scriptExecutor) s.Run("GetAccount - happy path", func() { s.testGetAccount(ctx, backend, codes.OK) @@ -162,7 +104,7 @@ func (s *BackendAccountsSuite) TestGetAccountFromExecutionNode_HappyPath() { } // TestGetAccountFromExecutionNode_Fails errors received from execution nodes are returned -func (s *BackendAccountsSuite) TestGetAccountFromExecutionNode_Fails() { +func (s *AccountsSuite) TestGetAccountFromExecutionNode_Fails() { ctx := context.Background() // use a status code that's not used in the API to make sure it's passed through @@ -172,8 +114,8 @@ func (s *BackendAccountsSuite) TestGetAccountFromExecutionNode_Fails() { s.setupExecutionNodes(s.block) s.setupENFailingResponse(s.block.ID(), errToReturn) - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeExecutionNodesOnly + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeExecutionNodesOnly, scriptExecutor) s.Run("GetAccount - fails with backend err", func() { s.testGetAccount(ctx, backend, statusCode) @@ -189,16 +131,14 @@ func (s *BackendAccountsSuite) TestGetAccountFromExecutionNode_Fails() { } // TestGetAccountFromStorage_HappyPath test successfully getting accounts from local storage -func (s *BackendAccountsSuite) TestGetAccountFromStorage_HappyPath() { +func (s *AccountsSuite) TestGetAccountFromStorage_HappyPath() { ctx := context.Background() scriptExecutor := execmock.NewScriptExecutor(s.T()) scriptExecutor.On("GetAccountAtBlockHeight", mock.Anything, s.account.Address, s.block.Header.Height). Return(s.account, nil) - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeLocalOnly - backend.scriptExecutor = scriptExecutor + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeLocalOnly, scriptExecutor) s.Run("GetAccount - happy path", func() { s.testGetAccount(ctx, backend, codes.OK) @@ -215,14 +155,11 @@ func (s *BackendAccountsSuite) TestGetAccountFromStorage_HappyPath() { // TestGetAccountFromStorage_Fails tests that errors received from local storage are handled // and converted to the appropriate status code -func (s *BackendAccountsSuite) TestGetAccountFromStorage_Fails() { +func (s *AccountsSuite) TestGetAccountFromStorage_Fails() { ctx := context.Background() scriptExecutor := execmock.NewScriptExecutor(s.T()) - - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeLocalOnly - backend.scriptExecutor = scriptExecutor + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeLocalOnly, scriptExecutor) testCases := []struct { err error @@ -267,17 +204,14 @@ func (s *BackendAccountsSuite) TestGetAccountFromStorage_Fails() { // TestGetAccountFromFailover_HappyPath tests that when an error is returned getting an account // from local storage, the backend will attempt to get the account from an execution node -func (s *BackendAccountsSuite) TestGetAccountFromFailover_HappyPath() { +func (s *AccountsSuite) TestGetAccountFromFailover_HappyPath() { ctx := context.Background() s.setupExecutionNodes(s.block) s.setupENSuccessResponse(s.block.ID()) scriptExecutor := execmock.NewScriptExecutor(s.T()) - - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeFailover - backend.scriptExecutor = scriptExecutor + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeFailover, scriptExecutor) for _, errToReturn := range []error{storage.ErrHeightNotIndexed, storage.ErrNotFound} { scriptExecutor.On("GetAccountAtBlockHeight", mock.Anything, s.account.Address, s.block.Header.Height). @@ -302,7 +236,7 @@ func (s *BackendAccountsSuite) TestGetAccountFromFailover_HappyPath() { // TestGetAccountFromFailover_ReturnsENErrors tests that when an error is returned from the execution // node during a failover, it is returned to the caller. -func (s *BackendAccountsSuite) TestGetAccountFromFailover_ReturnsENErrors() { +func (s *AccountsSuite) TestGetAccountFromFailover_ReturnsENErrors() { ctx := context.Background() // use a status code that's not used in the API to make sure it's passed through @@ -316,9 +250,7 @@ func (s *BackendAccountsSuite) TestGetAccountFromFailover_ReturnsENErrors() { scriptExecutor.On("GetAccountAtBlockHeight", mock.Anything, s.failingAddress, s.block.Header.Height). Return(nil, storage.ErrHeightNotIndexed) - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeFailover - backend.scriptExecutor = scriptExecutor + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeFailover, scriptExecutor) s.Run("GetAccount - fails with backend err", func() { s.testGetAccount(ctx, backend, statusCode) @@ -335,12 +267,9 @@ func (s *BackendAccountsSuite) TestGetAccountFromFailover_ReturnsENErrors() { // TestGetAccountAtLatestBlock_InconsistentState tests that signaler context received error when node state is // inconsistent -func (s *BackendAccountsSuite) TestGetAccountAtLatestBlockFromStorage_InconsistentState() { +func (s *AccountsSuite) TestGetAccountAtLatestBlockFromStorage_InconsistentState() { scriptExecutor := execmock.NewScriptExecutor(s.T()) - - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeLocalOnly - backend.scriptExecutor = scriptExecutor + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeLocalOnly, scriptExecutor) s.Run(fmt.Sprintf("GetAccountAtLatestBlock - fails with %v", "inconsistent node's state"), func() { s.state.On("Sealed").Return(s.snapshot, nil) @@ -358,17 +287,15 @@ func (s *BackendAccountsSuite) TestGetAccountAtLatestBlockFromStorage_Inconsiste } // TestGetAccountBalanceFromStorage_HappyPaths tests successfully getting accounts balance from storage -func (s *BackendAccountsSuite) TestGetAccountBalanceFromStorage_HappyPath() { +func (s *AccountsSuite) TestGetAccountBalanceFromStorage_HappyPath() { ctx := context.Background() scriptExecutor := execmock.NewScriptExecutor(s.T()) - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeLocalOnly - backend.scriptExecutor = scriptExecutor - scriptExecutor.On("GetAccountBalance", mock.Anything, s.account.Address, s.block.Header.Height). Return(s.account.Balance, nil) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeLocalOnly, scriptExecutor) + s.Run("GetAccountBalanceAtLatestBlock - happy path", func() { s.testGetAccountBalanceAtLatestBlock(ctx, backend) }) @@ -380,14 +307,14 @@ func (s *BackendAccountsSuite) TestGetAccountBalanceFromStorage_HappyPath() { } // TestGetAccountBalanceFromExecutionNode_HappyPath tests successfully getting accounts balance from execution nodes -func (s *BackendAccountsSuite) TestGetAccountBalanceFromExecutionNode_HappyPath() { +func (s *AccountsSuite) TestGetAccountBalanceFromExecutionNode_HappyPath() { ctx := context.Background() s.setupExecutionNodes(s.block) s.setupENSuccessResponse(s.block.ID()) - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeExecutionNodesOnly + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeExecutionNodesOnly, scriptExecutor) s.Run("GetAccountBalanceAtLatestBlock - happy path", func() { s.testGetAccountBalanceAtLatestBlock(ctx, backend) @@ -401,17 +328,14 @@ func (s *BackendAccountsSuite) TestGetAccountBalanceFromExecutionNode_HappyPath( // TestGetAccountBalanceFromFailover_HappyPath tests that when an error is returned getting accounts balance // from local storage, the backend will attempt to get the account balances from an execution node -func (s *BackendAccountsSuite) TestGetAccountBalanceFromFailover_HappyPath() { +func (s *AccountsSuite) TestGetAccountBalanceFromFailover_HappyPath() { ctx := context.Background() s.setupExecutionNodes(s.block) s.setupENSuccessResponse(s.block.ID()) scriptExecutor := execmock.NewScriptExecutor(s.T()) - - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeFailover - backend.scriptExecutor = scriptExecutor + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeFailover, scriptExecutor) for _, errToReturn := range []error{storage.ErrHeightNotIndexed, storage.ErrNotFound} { scriptExecutor.On("GetAccountBalance", mock.Anything, s.account.Address, s.block.Header.Height). @@ -431,16 +355,14 @@ func (s *BackendAccountsSuite) TestGetAccountBalanceFromFailover_HappyPath() { // TestGetAccountKeysScriptExecutionEnabled_HappyPath tests successfully getting accounts keys when // script execution is enabled. -func (s *BackendAccountsSuite) TestGetAccountKeysFromStorage_HappyPath() { +func (s *AccountsSuite) TestGetAccountKeysFromStorage_HappyPath() { ctx := context.Background() scriptExecutor := execmock.NewScriptExecutor(s.T()) scriptExecutor.On("GetAccountKeys", mock.Anything, s.account.Address, s.block.Header.Height). Return(s.account.Keys, nil) - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeLocalOnly - backend.scriptExecutor = scriptExecutor + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeLocalOnly, scriptExecutor) s.Run("GetAccountKeysAtLatestBlock - happy path", func() { s.testGetAccountKeysAtLatestBlock(ctx, backend) @@ -456,13 +378,10 @@ func (s *BackendAccountsSuite) TestGetAccountKeysFromStorage_HappyPath() { // TestGetAccountKeyScriptExecutionEnabled_HappyPath tests successfully getting account key by key index when // script execution is enabled. -func (s *BackendAccountsSuite) TestGetAccountKeyFromStorage_HappyPath() { +func (s *AccountsSuite) TestGetAccountKeyFromStorage_HappyPath() { ctx := context.Background() scriptExecutor := execmock.NewScriptExecutor(s.T()) - - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeLocalOnly - backend.scriptExecutor = scriptExecutor + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeLocalOnly, scriptExecutor) var keyIndex uint32 = 0 keyByIndex := findAccountKeyByIndex(s.account.Keys, keyIndex) @@ -481,14 +400,14 @@ func (s *BackendAccountsSuite) TestGetAccountKeyFromStorage_HappyPath() { } // TestGetAccountKeysFromExecutionNode_HappyPath tests successfully getting accounts keys from execution nodes -func (s *BackendAccountsSuite) TestGetAccountKeysFromExecutionNode_HappyPath() { +func (s *AccountsSuite) TestGetAccountKeysFromExecutionNode_HappyPath() { ctx := context.Background() s.setupExecutionNodes(s.block) s.setupENSuccessResponse(s.block.ID()) - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeExecutionNodesOnly + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeExecutionNodesOnly, scriptExecutor) s.Run("GetAccountKeysAtLatestBlock - all keys - happy path", func() { s.testGetAccountKeysAtLatestBlock(ctx, backend) @@ -503,14 +422,14 @@ func (s *BackendAccountsSuite) TestGetAccountKeysFromExecutionNode_HappyPath() { } // TestGetAccountKeyFromExecutionNode_HappyPath tests successfully getting accounts key by key index from execution nodes -func (s *BackendAccountsSuite) TestGetAccountKeyFromExecutionNode_HappyPath() { +func (s *AccountsSuite) TestGetAccountKeyFromExecutionNode_HappyPath() { ctx := context.Background() s.setupExecutionNodes(s.block) s.setupENSuccessResponse(s.block.ID()) - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeExecutionNodesOnly + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeExecutionNodesOnly, scriptExecutor) var keyIndex uint32 = 0 s.Run("GetAccountKeysAtLatestBlock - by key index - happy path", func() { @@ -527,17 +446,14 @@ func (s *BackendAccountsSuite) TestGetAccountKeyFromExecutionNode_HappyPath() { // TestGetAccountBalanceFromFailover_HappyPath tests that when an error is returned getting accounts keys // from local storage, the backend will attempt to get the account key from an execution node -func (s *BackendAccountsSuite) TestGetAccountKeysFromFailover_HappyPath() { +func (s *AccountsSuite) TestGetAccountKeysFromFailover_HappyPath() { ctx := context.Background() s.setupExecutionNodes(s.block) s.setupENSuccessResponse(s.block.ID()) scriptExecutor := execmock.NewScriptExecutor(s.T()) - - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeFailover - backend.scriptExecutor = scriptExecutor + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeFailover, scriptExecutor) for _, errToReturn := range []error{storage.ErrHeightNotIndexed, storage.ErrNotFound} { scriptExecutor.On("GetAccountKeys", mock.Anything, s.account.Address, s.block.Header.Height). @@ -557,18 +473,14 @@ func (s *BackendAccountsSuite) TestGetAccountKeysFromFailover_HappyPath() { // TestGetAccountKeyFromFailover_HappyPath tests that when an error is returned getting account key by key index // from local storage, the backend will attempt to get the account key from an execution node -func (s *BackendAccountsSuite) TestGetAccountKeyFromFailover_HappyPath() { +func (s *AccountsSuite) TestGetAccountKeyFromFailover_HappyPath() { ctx := context.Background() s.setupExecutionNodes(s.block) s.setupENSuccessResponse(s.block.ID()) scriptExecutor := execmock.NewScriptExecutor(s.T()) - - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeFailover - backend.scriptExecutor = scriptExecutor - + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeFailover, scriptExecutor) var keyIndex uint32 = 0 for _, errToReturn := range []error{storage.ErrHeightNotIndexed, storage.ErrNotFound} { @@ -587,7 +499,7 @@ func (s *BackendAccountsSuite) TestGetAccountKeyFromFailover_HappyPath() { } } -func (s *BackendAccountsSuite) testGetAccount(ctx context.Context, backend *backendAccounts, statusCode codes.Code) { +func (s *AccountsSuite) testGetAccount(ctx context.Context, backend *Accounts, statusCode codes.Code) { s.state.On("Sealed").Return(s.snapshot, nil).Once() s.snapshot.On("Head").Return(s.block.Header, nil).Once() @@ -603,7 +515,7 @@ func (s *BackendAccountsSuite) testGetAccount(ctx context.Context, backend *back } } -func (s *BackendAccountsSuite) testGetAccountAtLatestBlock(ctx context.Context, backend *backendAccounts, statusCode codes.Code) { +func (s *AccountsSuite) testGetAccountAtLatestBlock(ctx context.Context, backend *Accounts, statusCode codes.Code) { s.state.On("Sealed").Return(s.snapshot, nil).Once() s.snapshot.On("Head").Return(s.block.Header, nil).Once() @@ -619,7 +531,7 @@ func (s *BackendAccountsSuite) testGetAccountAtLatestBlock(ctx context.Context, } } -func (s *BackendAccountsSuite) testGetAccountAtBlockHeight(ctx context.Context, backend *backendAccounts, statusCode codes.Code) { +func (s *AccountsSuite) testGetAccountAtBlockHeight(ctx context.Context, backend *Accounts, statusCode codes.Code) { height := s.block.Header.Height s.headers.On("BlockIDByHeight", height).Return(s.block.Header.ID(), nil).Once() @@ -635,7 +547,7 @@ func (s *BackendAccountsSuite) testGetAccountAtBlockHeight(ctx context.Context, } } -func (s *BackendAccountsSuite) testGetAccountBalanceAtLatestBlock(ctx context.Context, backend *backendAccounts) { +func (s *AccountsSuite) testGetAccountBalanceAtLatestBlock(ctx context.Context, backend *Accounts) { s.state.On("Sealed").Return(s.snapshot, nil).Once() s.snapshot.On("Head").Return(s.block.Header, nil).Once() @@ -644,13 +556,13 @@ func (s *BackendAccountsSuite) testGetAccountBalanceAtLatestBlock(ctx context.Co s.Require().Equal(s.account.Balance, actual) } -func (s *BackendAccountsSuite) testGetAccountBalanceAtBlockHeight(ctx context.Context, backend *backendAccounts) { +func (s *AccountsSuite) testGetAccountBalanceAtBlockHeight(ctx context.Context, backend *Accounts) { actual, err := backend.GetAccountBalanceAtBlockHeight(ctx, s.account.Address, s.block.Header.Height) s.Require().NoError(err) s.Require().Equal(s.account.Balance, actual) } -func (s *BackendAccountsSuite) testGetAccountKeysAtLatestBlock(ctx context.Context, backend *backendAccounts) { +func (s *AccountsSuite) testGetAccountKeysAtLatestBlock(ctx context.Context, backend *Accounts) { s.state.On("Sealed").Return(s.snapshot, nil).Once() s.snapshot.On("Head").Return(s.block.Header, nil).Once() @@ -659,7 +571,7 @@ func (s *BackendAccountsSuite) testGetAccountKeysAtLatestBlock(ctx context.Conte s.Require().Equal(s.account.Keys, actual) } -func (s *BackendAccountsSuite) testGetAccountKeyAtLatestBlock(ctx context.Context, backend *backendAccounts, keyIndex uint32) { +func (s *AccountsSuite) testGetAccountKeyAtLatestBlock(ctx context.Context, backend *Accounts, keyIndex uint32) { s.state.On("Sealed").Return(s.snapshot, nil).Once() s.snapshot.On("Head").Return(s.block.Header, nil).Once() @@ -669,14 +581,13 @@ func (s *BackendAccountsSuite) testGetAccountKeyAtLatestBlock(ctx context.Contex s.Require().Equal(expectedKeyByIndex, actual) } -func (s *BackendAccountsSuite) testGetAccountKeysAtBlockHeight(ctx context.Context, backend *backendAccounts) { +func (s *AccountsSuite) testGetAccountKeysAtBlockHeight(ctx context.Context, backend *Accounts) { actual, err := backend.GetAccountKeysAtBlockHeight(ctx, s.account.Address, s.block.Header.Height) s.Require().NoError(err) s.Require().Equal(s.account.Keys, actual) } -func (s *BackendAccountsSuite) testGetAccountKeyAtBlockHeight(ctx context.Context, backend *backendAccounts, keyIndex uint32) { - +func (s *AccountsSuite) testGetAccountKeyAtBlockHeight(ctx context.Context, backend *Accounts, keyIndex uint32) { actual, err := backend.GetAccountKeyAtBlockHeight(ctx, s.account.Address, keyIndex, s.block.Header.Height) expectedKeyByIndex := findAccountKeyByIndex(s.account.Keys, keyIndex) s.Require().NoError(err) @@ -691,3 +602,69 @@ func findAccountKeyByIndex(keys []flow.AccountPublicKey, keyIndex uint32) *flow. } return &flow.AccountPublicKey{} } + +func (s *AccountsSuite) defaultAccountsBackend(mode query_mode.IndexQueryMode, executor *execmock.ScriptExecutor) *Accounts { + accounts, err := NewAccountsBackend( + s.log, + s.state, + s.headers, + s.connectionFactory, + node_communicator.NewNodeCommunicator(false), + mode, + executor, + commonrpc.NewExecutionNodeIdentitiesProvider( + s.log, + s.state, + s.receipts, + flow.IdentifierList{}, + flow.IdentifierList{}, + ), + ) + require.NoError(s.T(), err) + + return accounts +} + +// setupExecutionNodes sets up the mocks required to test against an EN backend +func (s *AccountsSuite) setupExecutionNodes(block *flow.Block) { + s.params.On("FinalizedRoot").Return(s.rootHeader, nil) + s.state.On("Params").Return(s.params) + s.state.On("Final").Return(s.snapshot) + s.snapshot.On("Identities", mock.Anything).Return(s.executionNodes, nil) + + // this line causes a S1021 lint error because receipts is explicitly declared. this is required + // to ensure the mock library handles the response type correctly + var receipts flow.ExecutionReceiptList //nolint:gosimple + receipts = unittest.ReceiptsForBlockFixture(block, s.executionNodes.NodeIDs()) + s.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) + + s.connectionFactory.On("GetExecutionAPIClient", mock.Anything). + Return(s.execClient, &mocks.MockCloser{}, nil) +} + +// setupENSuccessResponse configures the execution node client to return a successful response +func (s *AccountsSuite) setupENSuccessResponse(blockID flow.Identifier) { + expectedExecRequest := &execproto.GetAccountAtBlockIDRequest{ + BlockId: blockID[:], + Address: s.account.Address.Bytes(), + } + + convertedAccount, err := convert.AccountToMessage(s.account) + s.Require().NoError(err) + + s.execClient.On("GetAccountAtBlockID", mock.Anything, expectedExecRequest). + Return(&execproto.GetAccountAtBlockIDResponse{ + Account: convertedAccount, + }, nil) +} + +// setupENFailingResponse configures the execution node client to return an error +func (s *AccountsSuite) setupENFailingResponse(blockID flow.Identifier, err error) { + failingRequest := &execproto.GetAccountAtBlockIDRequest{ + BlockId: blockID[:], + Address: s.failingAddress.Bytes(), + } + + s.execClient.On("GetAccountAtBlockID", mock.Anything, failingRequest). + Return(nil, err) +} diff --git a/engine/access/rpc/backend/accounts/provider/comparing.go b/engine/access/rpc/backend/accounts/provider/comparing.go new file mode 100644 index 00000000000..011a1a38713 --- /dev/null +++ b/engine/access/rpc/backend/accounts/provider/comparing.go @@ -0,0 +1,175 @@ +package provider + +import ( + "bytes" + "context" + "errors" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" +) + +type ComparingAccountProvider struct { + FailoverAccountProvider +} + +var _ AccountProvider = (*ComparingAccountProvider)(nil) + +func NewComparingAccountProvider( + log zerolog.Logger, + state protocol.State, + localRequester AccountProvider, + execNodeRequester AccountProvider, +) *ComparingAccountProvider { + return &ComparingAccountProvider{ + FailoverAccountProvider: FailoverAccountProvider{ + log: log.With().Str("account_provider", "comparing").Logger(), + state: state, + localRequester: localRequester, + execNodeRequester: execNodeRequester, + }, + } +} + +func (c *ComparingAccountProvider) GetAccountAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + height uint64, +) (*flow.Account, error) { + localAccount, localErr := c.localRequester.GetAccountAtBlock(ctx, address, blockID, height) + if localErr == nil { + return localAccount, nil + } + + execNodeAccount, execNodeErr := c.execNodeRequester.GetAccountAtBlock(ctx, address, blockID, height) + c.compareAccountResults(execNodeAccount, execNodeErr, localAccount, localErr, blockID, address) + + return execNodeAccount, execNodeErr +} + +// compareAccountResults compares the result and error returned from local and remote getAccount calls +// and logs the results if they are different +func (c *ComparingAccountProvider) compareAccountResults( + execNodeResult *flow.Account, + execErr error, + localResult *flow.Account, + localErr error, + blockID flow.Identifier, + address flow.Address, +) { + if c.log.GetLevel() > zerolog.DebugLevel { + return + } + + lgCtx := c.log.With(). + Hex("block_id", blockID[:]). + Str("address", address.String()) + + // errors are different + if !errors.Is(execErr, localErr) { + lgCtx = lgCtx. + AnErr("execution_node_error", execErr). + AnErr("local_error", localErr) + + lg := lgCtx.Logger() + lg.Debug().Msg("errors from getting account on local and EN do not match") + return + } + + // both errors are nil, compare the accounts + if execErr == nil { + lgCtx, ok := compareAccountsLogger(execNodeResult, localResult, lgCtx) + if !ok { + lg := lgCtx.Logger() + lg.Debug().Msg("accounts from local and EN do not match") + } + } +} + +// compareAccountsLogger compares accounts produced by the execution node and local storage and +// return a logger configured to log the differences +func compareAccountsLogger(exec, local *flow.Account, lgCtx zerolog.Context) (zerolog.Context, bool) { + different := false + + if exec.Address != local.Address { + lgCtx = lgCtx. + Str("exec_node_address", exec.Address.String()). + Str("local_address", local.Address.String()) + different = true + } + + if exec.Balance != local.Balance { + lgCtx = lgCtx. + Uint64("exec_node_balance", exec.Balance). + Uint64("local_balance", local.Balance) + different = true + } + + contractListMatches := true + if len(exec.Contracts) != len(local.Contracts) { + lgCtx = lgCtx. + Int("exec_node_contract_count", len(exec.Contracts)). + Int("local_contract_count", len(local.Contracts)) + contractListMatches = false + different = true + } + + missingContracts := zerolog.Arr() + mismatchContracts := zerolog.Arr() + + for name, execContract := range exec.Contracts { + localContract, ok := local.Contracts[name] + + if !ok { + missingContracts.Str(name) + contractListMatches = false + different = true + } + + if !bytes.Equal(execContract, localContract) { + mismatchContracts.Str(name) + different = true + } + } + + lgCtx = lgCtx. + Array("missing_contracts", missingContracts). + Array("mismatch_contracts", mismatchContracts) + + // only check if there were any missing + if !contractListMatches { + extraContracts := zerolog.Arr() + for name := range local.Contracts { + if _, ok := exec.Contracts[name]; !ok { + extraContracts.Str(name) + different = true + } + } + lgCtx = lgCtx.Array("extra_contracts", extraContracts) + } + + if len(exec.Keys) != len(local.Keys) { + lgCtx = lgCtx. + Int("exec_node_key_count", len(exec.Keys)). + Int("local_key_count", len(local.Keys)) + different = true + } + + mismatchKeys := zerolog.Arr() + + for i, execKey := range exec.Keys { + localKey := local.Keys[i] + + if !execKey.PublicKey.Equals(localKey.PublicKey) { + mismatchKeys.Uint32(execKey.Index) + different = true + } + } + + lgCtx = lgCtx.Array("mismatch_keys", mismatchKeys) + + return lgCtx, !different +} diff --git a/engine/access/rpc/backend/accounts/provider/execution_node.go b/engine/access/rpc/backend/accounts/provider/execution_node.go new file mode 100644 index 00000000000..f04be98de6d --- /dev/null +++ b/engine/access/rpc/backend/accounts/provider/execution_node.go @@ -0,0 +1,170 @@ +package provider + +import ( + "context" + "time" + + "github.com/rs/zerolog" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" +) + +type ENAccountProvider struct { + log zerolog.Logger + state protocol.State + connFactory connection.ConnectionFactory + nodeCommunicator node_communicator.Communicator + execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider +} + +var _ AccountProvider = (*ENAccountProvider)(nil) + +func NewENAccountProvider( + log zerolog.Logger, + state protocol.State, + connFactory connection.ConnectionFactory, + nodeCommunicator node_communicator.Communicator, + execNodeIdentityProvider *rpc.ExecutionNodeIdentitiesProvider, +) *ENAccountProvider { + return &ENAccountProvider{ + log: log.With().Str("account_provider", "execution_node").Logger(), + state: state, + connFactory: connFactory, + nodeCommunicator: nodeCommunicator, + execNodeIdentitiesProvider: execNodeIdentityProvider, + } +} + +func (e *ENAccountProvider) GetAccountAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + _ uint64, +) (*flow.Account, error) { + req := &execproto.GetAccountAtBlockIDRequest{ + Address: address.Bytes(), + BlockId: blockID[:], + } + + execNodes, err := e.execNodeIdentitiesProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + return nil, rpc.ConvertError(err, "failed to find execution node to query", codes.Internal) + } + + var resp *execproto.GetAccountAtBlockIDResponse + errToReturn := e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + start := time.Now() + + resp, err = e.tryGetAccount(ctx, node, req) + duration := time.Since(start) + + lg := e.log.With(). + Str("execution_node", node.String()). + Hex("block_id", req.GetBlockId()). + Hex("address", req.GetAddress()). + Int64("rtt_ms", duration.Milliseconds()). + Logger() + + if err != nil { + lg.Err(err).Msg("failed to execute GetAccount") + return err + } + + // return if any execution node replied successfully + lg.Debug().Msg("Successfully got account info") + return nil + }, + nil, + ) + + if errToReturn != nil { + return nil, rpc.ConvertError(errToReturn, "failed to get account from the execution node", codes.Internal) + } + + account, err := convert.MessageToAccount(resp.GetAccount()) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to convert account message: %v", err) + } + + return account, nil +} + +func (e *ENAccountProvider) GetAccountBalanceAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + height uint64, +) (uint64, error) { + account, err := e.GetAccountAtBlock(ctx, address, blockID, height) + if err != nil { + return 0, err + } + + return account.Balance, nil +} + +func (e *ENAccountProvider) GetAccountKeyAtBlock( + ctx context.Context, + address flow.Address, + keyIndex uint32, + blockID flow.Identifier, + height uint64, +) (*flow.AccountPublicKey, error) { + account, err := e.GetAccountAtBlock(ctx, address, blockID, height) + if err != nil { + return nil, err + } + + for _, key := range account.Keys { + if key.Index == keyIndex { + return &key, nil + } + } + + return nil, status.Errorf(codes.NotFound, "failed to get account key by index: %d", keyIndex) +} + +func (e *ENAccountProvider) GetAccountKeysAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + height uint64, +) ([]flow.AccountPublicKey, error) { + account, err := e.GetAccountAtBlock(ctx, address, blockID, height) + if err != nil { + return nil, err + } + + return account.Keys, nil +} + +// tryGetAccount attempts to get the account from the given execution node. +func (e *ENAccountProvider) tryGetAccount( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetAccountAtBlockIDRequest, +) (*execproto.GetAccountAtBlockIDResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + + return execRPCClient.GetAccountAtBlockID(ctx, req) +} diff --git a/engine/access/rpc/backend/accounts/provider/failover.go b/engine/access/rpc/backend/accounts/provider/failover.go new file mode 100644 index 00000000000..d9a942133af --- /dev/null +++ b/engine/access/rpc/backend/accounts/provider/failover.go @@ -0,0 +1,106 @@ +package provider + +import ( + "context" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" +) + +type FailoverAccountProvider struct { + log zerolog.Logger + state protocol.State + localRequester AccountProvider + execNodeRequester AccountProvider +} + +var _ AccountProvider = (*FailoverAccountProvider)(nil) + +func NewFailoverAccountProvider( + log zerolog.Logger, + state protocol.State, + localRequester AccountProvider, + execNodeRequester AccountProvider, +) *FailoverAccountProvider { + return &FailoverAccountProvider{ + log: log.With().Str("account_provider", "failover").Logger(), + state: state, + localRequester: localRequester, + execNodeRequester: execNodeRequester, + } +} + +func (f *FailoverAccountProvider) GetAccountAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + height uint64, +) (*flow.Account, error) { + localAccount, localErr := f.localRequester.GetAccountAtBlock(ctx, address, blockID, height) + if localErr == nil { + return localAccount, nil + } + + execNodeAccount, execNodeErr := f.execNodeRequester.GetAccountAtBlock(ctx, address, blockID, height) + return execNodeAccount, execNodeErr +} + +func (f *FailoverAccountProvider) GetAccountBalanceAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + height uint64, +) (uint64, error) { + localBalance, localErr := f.localRequester.GetAccountBalanceAtBlock(ctx, address, blockID, height) + if localErr == nil { + return localBalance, nil + } + + execNodeBalance, execNodeErr := f.execNodeRequester.GetAccountBalanceAtBlock(ctx, address, blockID, height) + if execNodeErr != nil { + return 0, execNodeErr + } + + return execNodeBalance, nil +} + +func (f *FailoverAccountProvider) GetAccountKeyAtBlock( + ctx context.Context, + address flow.Address, + keyIndex uint32, + blockID flow.Identifier, + height uint64, +) (*flow.AccountPublicKey, error) { + localKey, localErr := f.localRequester.GetAccountKeyAtBlock(ctx, address, keyIndex, blockID, height) + if localErr == nil { + return localKey, nil + } + + execNodeKey, execNodeErr := f.execNodeRequester.GetAccountKeyAtBlock(ctx, address, keyIndex, blockID, height) + if execNodeErr != nil { + return nil, execNodeErr + } + + return execNodeKey, nil +} + +func (f *FailoverAccountProvider) GetAccountKeysAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + height uint64, +) ([]flow.AccountPublicKey, error) { + localKeys, localErr := f.localRequester.GetAccountKeysAtBlock(ctx, address, blockID, height) + if localErr == nil { + return localKeys, nil + } + + execNodeKeys, execNodeErr := f.execNodeRequester.GetAccountKeysAtBlock(ctx, address, blockID, height) + if execNodeErr != nil { + return nil, execNodeErr + } + + return execNodeKeys, nil +} diff --git a/engine/access/rpc/backend/accounts/provider/local.go b/engine/access/rpc/backend/accounts/provider/local.go new file mode 100644 index 00000000000..5fe943e2261 --- /dev/null +++ b/engine/access/rpc/backend/accounts/provider/local.go @@ -0,0 +1,114 @@ +package provider + +import ( + "context" + "errors" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/common/rpc" + fvmerrors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +type LocalAccountProvider struct { + log zerolog.Logger + state protocol.State + scriptExecutor execution.ScriptExecutor +} + +var _ AccountProvider = (*LocalAccountProvider)(nil) + +func NewLocalAccountProvider( + log zerolog.Logger, + state protocol.State, + scriptExecutor execution.ScriptExecutor, +) *LocalAccountProvider { + return &LocalAccountProvider{ + log: log.With().Str("account_provider", "local").Logger(), + state: state, + scriptExecutor: scriptExecutor, + } +} + +func (l *LocalAccountProvider) GetAccountAtBlock( + ctx context.Context, + address flow.Address, + _ flow.Identifier, + height uint64, +) (*flow.Account, error) { + account, err := l.scriptExecutor.GetAccountAtBlockHeight(ctx, address, height) + if err != nil { + return nil, convertAccountError(common.ResolveHeightError(l.state.Params(), height, err), address, height) + } + return account, nil +} + +func (l *LocalAccountProvider) GetAccountBalanceAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + height uint64, +) (uint64, error) { + accountBalance, err := l.scriptExecutor.GetAccountBalance(ctx, address, height) + if err != nil { + l.log.Debug().Err(err).Msgf("failed to get account balance at blockID: %v", blockID) + return 0, err + } + + return accountBalance, nil +} + +func (l *LocalAccountProvider) GetAccountKeyAtBlock( + ctx context.Context, + address flow.Address, + keyIndex uint32, + _ flow.Identifier, + height uint64, +) (*flow.AccountPublicKey, error) { + accountKey, err := l.scriptExecutor.GetAccountKey(ctx, address, keyIndex, height) + if err != nil { + l.log.Debug().Err(err).Msgf("failed to get account key at height: %d", height) + return nil, err + } + + return accountKey, nil +} + +func (l *LocalAccountProvider) GetAccountKeysAtBlock( + ctx context.Context, + address flow.Address, + _ flow.Identifier, + height uint64, +) ([]flow.AccountPublicKey, error) { + accountKeys, err := l.scriptExecutor.GetAccountKeys(ctx, address, height) + if err != nil { + l.log.Debug().Err(err).Msgf("failed to get account keys at height: %d", height) + return nil, err + } + + return accountKeys, nil +} + +// convertAccountError converts the script execution error to a gRPC error +func convertAccountError(err error, address flow.Address, height uint64) error { + if err == nil { + return nil + } + + if errors.Is(err, storage.ErrNotFound) { + return status.Errorf(codes.NotFound, "account with address %s not found: %v", address, err) + } + + if fvmerrors.IsAccountNotFoundError(err) { + return status.Errorf(codes.NotFound, "account not found") + } + + return rpc.ConvertIndexError(err, height, "failed to get account") +} diff --git a/engine/access/rpc/backend/accounts/provider/mock/account_provider.go b/engine/access/rpc/backend/accounts/provider/mock/account_provider.go new file mode 100644 index 00000000000..67b36f8e722 --- /dev/null +++ b/engine/access/rpc/backend/accounts/provider/mock/account_provider.go @@ -0,0 +1,147 @@ +// Code generated by mockery v2.53.3. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// AccountProvider is an autogenerated mock type for the AccountProvider type +type AccountProvider struct { + mock.Mock +} + +// GetAccountAtBlock provides a mock function with given fields: ctx, address, blockID, height +func (_m *AccountProvider) GetAccountAtBlock(ctx context.Context, address flow.Address, blockID flow.Identifier, height uint64) (*flow.Account, error) { + ret := _m.Called(ctx, address, blockID, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountAtBlock") + } + + var r0 *flow.Account + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier, uint64) (*flow.Account, error)); ok { + return rf(ctx, address, blockID, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier, uint64) *flow.Account); ok { + r0 = rf(ctx, address, blockID, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Account) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, flow.Identifier, uint64) error); ok { + r1 = rf(ctx, address, blockID, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountBalanceAtBlock provides a mock function with given fields: ctx, address, blockID, height +func (_m *AccountProvider) GetAccountBalanceAtBlock(ctx context.Context, address flow.Address, blockID flow.Identifier, height uint64) (uint64, error) { + ret := _m.Called(ctx, address, blockID, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountBalanceAtBlock") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier, uint64) (uint64, error)); ok { + return rf(ctx, address, blockID, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier, uint64) uint64); ok { + r0 = rf(ctx, address, blockID, height) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, flow.Identifier, uint64) error); ok { + r1 = rf(ctx, address, blockID, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeyAtBlock provides a mock function with given fields: ctx, address, keyIndex, blockID, height +func (_m *AccountProvider) GetAccountKeyAtBlock(ctx context.Context, address flow.Address, keyIndex uint32, blockID flow.Identifier, height uint64) (*flow.AccountPublicKey, error) { + ret := _m.Called(ctx, address, keyIndex, blockID, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeyAtBlock") + } + + var r0 *flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, flow.Identifier, uint64) (*flow.AccountPublicKey, error)); ok { + return rf(ctx, address, keyIndex, blockID, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, flow.Identifier, uint64) *flow.AccountPublicKey); ok { + r0 = rf(ctx, address, keyIndex, blockID, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint32, flow.Identifier, uint64) error); ok { + r1 = rf(ctx, address, keyIndex, blockID, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeysAtBlock provides a mock function with given fields: ctx, address, blockID, height +func (_m *AccountProvider) GetAccountKeysAtBlock(ctx context.Context, address flow.Address, blockID flow.Identifier, height uint64) ([]flow.AccountPublicKey, error) { + ret := _m.Called(ctx, address, blockID, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeysAtBlock") + } + + var r0 []flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier, uint64) ([]flow.AccountPublicKey, error)); ok { + return rf(ctx, address, blockID, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier, uint64) []flow.AccountPublicKey); ok { + r0 = rf(ctx, address, blockID, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, flow.Identifier, uint64) error); ok { + r1 = rf(ctx, address, blockID, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewAccountProvider creates a new instance of AccountProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAccountProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *AccountProvider { + mock := &AccountProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/backend/accounts/provider/provider.go b/engine/access/rpc/backend/accounts/provider/provider.go new file mode 100644 index 00000000000..36818399fae --- /dev/null +++ b/engine/access/rpc/backend/accounts/provider/provider.go @@ -0,0 +1,14 @@ +package provider + +import ( + "context" + + "github.com/onflow/flow-go/model/flow" +) + +type AccountProvider interface { + GetAccountAtBlock(ctx context.Context, address flow.Address, blockID flow.Identifier, height uint64) (*flow.Account, error) + GetAccountBalanceAtBlock(ctx context.Context, address flow.Address, blockID flow.Identifier, height uint64) (uint64, error) + GetAccountKeyAtBlock(ctx context.Context, address flow.Address, keyIndex uint32, blockID flow.Identifier, height uint64) (*flow.AccountPublicKey, error) + GetAccountKeysAtBlock(ctx context.Context, address flow.Address, blockID flow.Identifier, height uint64) ([]flow.AccountPublicKey, error) +} diff --git a/engine/access/rpc/backend/backend.go b/engine/access/rpc/backend/backend.go index 2cff4733d4d..56af2849eb3 100644 --- a/engine/access/rpc/backend/backend.go +++ b/engine/access/rpc/backend/backend.go @@ -3,7 +3,6 @@ package backend import ( "context" "crypto/md5" //nolint:gosec - "errors" "fmt" "time" @@ -11,9 +10,21 @@ import ( accessproto "github.com/onflow/flow/protobuf/go/flow/access" "github.com/rs/zerolog" + "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/access/validator" "github.com/onflow/flow-go/cmd/build" "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/access/rpc/backend/accounts" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/events" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + "github.com/onflow/flow-go/engine/access/rpc/backend/scripts" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" + txstream "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/stream" "github.com/onflow/flow-go/engine/access/rpc/connection" "github.com/onflow/flow-go/engine/access/subscription" "github.com/onflow/flow-go/engine/access/subscription/tracker" @@ -31,17 +42,10 @@ import ( "github.com/onflow/flow-go/storage" ) -// DefaultMaxHeightRange is the default maximum size of range requests. -const DefaultMaxHeightRange = 250 - // DefaultSnapshotHistoryLimit the amount of blocks to look back in state // when recursively searching for a valid snapshot const DefaultSnapshotHistoryLimit = 500 -// DefaultLoggedScriptsCacheSize is the default size of the lookup cache used to dedupe logs of scripts sent to ENs -// limiting cache size to 16MB and does not affect script execution, only for keeping logs tidy -const DefaultLoggedScriptsCacheSize = 1_000_000 - // DefaultConnectionPoolSize is the default size for the connection pool to collection and execution nodes const DefaultConnectionPoolSize = 250 @@ -58,26 +62,25 @@ const DefaultConnectionPoolSize = 250 // // All remaining calls are handled by the base Backend in this file. type Backend struct { - backendScripts - backendTransactions - backendEvents + accounts.Accounts + events.Events + scripts.Scripts + transactions.Transactions + txstream.TransactionStream backendBlockHeaders backendBlockDetails - backendAccounts backendExecutionResults backendNetwork backendSubscribeBlocks - backendSubscribeTransactions - state protocol.State - chainID flow.ChainID - collections storage.Collections - executionReceipts storage.ExecutionReceipts - connFactory connection.ConnectionFactory + state protocol.State + collections storage.Collections + staticCollectionRPC accessproto.AccessAPIClient - BlockTracker tracker.BlockTracker stateParams protocol.Params versionControl *version.VersionControl + + BlockTracker tracker.BlockTracker } type Params struct { @@ -98,34 +101,30 @@ type Params struct { MaxHeightRange uint Log zerolog.Logger SnapshotHistoryLimit int - Communicator Communicator + Communicator node_communicator.Communicator TxResultCacheSize uint ScriptExecutor execution.ScriptExecutor - ScriptExecutionMode IndexQueryMode + ScriptExecutionMode query_mode.IndexQueryMode CheckPayerBalanceMode validator.PayerBalanceMode - EventQueryMode IndexQueryMode + EventQueryMode query_mode.IndexQueryMode BlockTracker tracker.BlockTracker SubscriptionHandler *subscription.SubscriptionHandler EventsIndex *index.EventsIndex - TxResultQueryMode IndexQueryMode + TxResultQueryMode query_mode.IndexQueryMode TxResultsIndex *index.TransactionResultsIndex LastFullBlockHeight *counters.PersistentStrictMonotonicCounter IndexReporter state_synchronization.IndexReporter VersionControl *version.VersionControl ExecNodeIdentitiesProvider *commonrpc.ExecutionNodeIdentitiesProvider + TxErrorMessageProvider error_messages.Provider } -var _ TransactionErrorMessage = (*Backend)(nil) +var _ access.API = (*Backend)(nil) // New creates backend instance func New(params Params) (*Backend, error) { - retry := newRetry(params.Log) - if params.RetryEnabled { - retry.Activate() - } - - loggedScripts, err := lru.New[[md5.Size]byte, time.Time](DefaultLoggedScriptsCacheSize) + loggedScripts, err := lru.New[[md5.Size]byte, time.Time](common.DefaultLoggedScriptsCacheSize) if err != nil { return nil, fmt.Errorf("failed to initialize script logging cache: %w", err) } @@ -145,51 +144,170 @@ func New(params Params) (*Backend, error) { } systemTxID := systemTx.ID() - b := &Backend{ - state: params.State, - BlockTracker: params.BlockTracker, - // create the sub-backends - backendScripts: backendScripts{ - log: params.Log, - headers: params.Headers, - connFactory: params.ConnFactory, - state: params.State, - metrics: params.AccessMetrics, - loggedScripts: loggedScripts, - nodeCommunicator: params.Communicator, - scriptExecutor: params.ScriptExecutor, - scriptExecMode: params.ScriptExecutionMode, - execNodeIdentitiesProvider: params.ExecNodeIdentitiesProvider, - }, - backendEvents: backendEvents{ - log: params.Log, - chain: params.ChainID.Chain(), - state: params.State, - headers: params.Headers, - connFactory: params.ConnFactory, - maxHeightRange: params.MaxHeightRange, - nodeCommunicator: params.Communicator, - queryMode: params.EventQueryMode, - eventsIndex: params.EventsIndex, - execNodeIdentitiesProvider: params.ExecNodeIdentitiesProvider, + accountsBackend, err := accounts.NewAccountsBackend( + params.Log, + params.State, + params.Headers, + params.ConnFactory, + params.Communicator, + params.ScriptExecutionMode, + params.ScriptExecutor, + params.ExecNodeIdentitiesProvider, + ) + if err != nil { + return nil, fmt.Errorf("failed to create accounts: %w", err) + } + + eventsBackend, err := events.NewEventsBackend( + params.Log, + params.State, + params.ChainID.Chain(), + params.MaxHeightRange, + params.Headers, + params.ConnFactory, + params.Communicator, + params.EventQueryMode, + params.EventsIndex, + params.ExecNodeIdentitiesProvider, + ) + if err != nil { + return nil, fmt.Errorf("failed to create events: %w", err) + } + + scriptsBackend, err := scripts.NewScriptsBackend( + params.Log, + params.AccessMetrics, + params.Headers, + params.State, + params.ConnFactory, + params.Communicator, + params.ScriptExecutor, + params.ScriptExecutionMode, + params.ExecNodeIdentitiesProvider, + loggedScripts, + ) + if err != nil { + return nil, fmt.Errorf("failed to create scripts: %w", err) + } + + txValidator, err := validator.NewTransactionValidator( + validator.NewProtocolStateBlocks(params.State, params.IndexReporter), + params.ChainID.Chain(), + params.AccessMetrics, + validator.TransactionValidationOptions{ + Expiry: flow.DefaultTransactionExpiry, + ExpiryBuffer: flow.DefaultTransactionExpiryBuffer, + AllowEmptyReferenceBlockID: false, + AllowUnknownReferenceBlockID: false, + CheckScriptsParse: false, + MaxGasLimit: flow.DefaultMaxTransactionGasLimit, + MaxTransactionByteSize: flow.DefaultMaxTransactionByteSize, + MaxCollectionByteSize: flow.DefaultMaxCollectionByteSize, + CheckPayerBalanceMode: params.CheckPayerBalanceMode, }, + params.ScriptExecutor, + ) + if err != nil { + return nil, fmt.Errorf("could not create transaction validator: %w", err) + } + + txStatusDeriver := status.NewTxStatusDeriver(params.State, params.LastFullBlockHeight) + + localTxProvider := provider.NewLocalTransactionProvider( + params.State, + params.Collections, + params.Blocks, + params.EventsIndex, + params.TxResultsIndex, + params.TxErrorMessageProvider, + systemTxID, + txStatusDeriver, + ) + execNodeTxProvider := provider.NewENTransactionProvider( + params.Log, + params.State, + params.Collections, + params.ConnFactory, + params.Communicator, + params.ExecNodeIdentitiesProvider, + txStatusDeriver, + systemTxID, + systemTx, + ) + failoverTxProvider := provider.NewFailoverTransactionProvider(localTxProvider, execNodeTxProvider) + + txParams := transactions.Params{ + Log: params.Log, + Metrics: params.AccessMetrics, + State: params.State, + ChainID: params.ChainID, + SystemTx: systemTx, + SystemTxID: systemTxID, + StaticCollectionRPCClient: params.CollectionRPC, + HistoricalAccessNodeClients: params.HistoricalAccessNodes, + NodeCommunicator: params.Communicator, + ConnFactory: params.ConnFactory, + EnableRetries: params.RetryEnabled, + NodeProvider: params.ExecNodeIdentitiesProvider, + Blocks: params.Blocks, + Collections: params.Collections, + Transactions: params.Transactions, + TxErrorMessageProvider: params.TxErrorMessageProvider, + TxResultCache: txResCache, + TxValidator: txValidator, + TxStatusDeriver: txStatusDeriver, + EventsIndex: params.EventsIndex, + TxResultsIndex: params.TxResultsIndex, + } + + switch params.TxResultQueryMode { + case query_mode.IndexQueryModeLocalOnly: + txParams.TxProvider = localTxProvider + case query_mode.IndexQueryModeExecutionNodesOnly: + txParams.TxProvider = execNodeTxProvider + case query_mode.IndexQueryModeFailover: + txParams.TxProvider = failoverTxProvider + default: + return nil, fmt.Errorf("invalid tx result query mode: %s", params.TxResultQueryMode) + } + + txBackend, err := transactions.NewTransactionsBackend(txParams) + if err != nil { + return nil, fmt.Errorf("failed to create transactions backend: %w", err) + } + + txStreamBackend := txstream.NewTransactionStreamBackend( + params.Log, + params.State, + params.SubscriptionHandler, + params.BlockTracker, + txBackend.SendTransaction, + params.Blocks, + params.Collections, + params.Transactions, + failoverTxProvider, + txStatusDeriver, + ) + + b := &Backend{ + Accounts: *accountsBackend, + Events: *eventsBackend, + Scripts: *scriptsBackend, + Transactions: *txBackend, + TransactionStream: *txStreamBackend, backendBlockHeaders: backendBlockHeaders{ - headers: params.Headers, - state: params.State, + backendBlockBase: backendBlockBase{ + blocks: params.Blocks, + headers: params.Headers, + state: params.State, + }, }, backendBlockDetails: backendBlockDetails{ - blocks: params.Blocks, - state: params.State, - }, - backendAccounts: backendAccounts{ - log: params.Log, - state: params.State, - headers: params.Headers, - connFactory: params.ConnFactory, - nodeCommunicator: params.Communicator, - scriptExecutor: params.ScriptExecutor, - scriptExecMode: params.ScriptExecutionMode, - execNodeIdentitiesProvider: params.ExecNodeIdentitiesProvider, + backendBlockBase: backendBlockBase{ + blocks: params.Blocks, + headers: params.Headers, + state: params.State, + }, }, backendExecutionResults: backendExecutionResults{ executionResults: params.ExecutionResults, @@ -209,90 +327,17 @@ func New(params Params) (*Backend, error) { blockTracker: params.BlockTracker, }, - collections: params.Collections, - executionReceipts: params.ExecutionReceipts, - connFactory: params.ConnFactory, - chainID: params.ChainID, - stateParams: params.State.Params(), - versionControl: params.VersionControl, - } - - txValidator, err := configureTransactionValidator(params.State, params.ChainID, params.IndexReporter, params.AccessMetrics, params.ScriptExecutor, params.CheckPayerBalanceMode) - if err != nil { - return nil, fmt.Errorf("could not create transaction validator: %w", err) - } - - b.backendTransactions = backendTransactions{ - TransactionsLocalDataProvider: &TransactionsLocalDataProvider{ - state: params.State, - collections: params.Collections, - blocks: params.Blocks, - eventsIndex: params.EventsIndex, - txResultsIndex: params.TxResultsIndex, - systemTxID: systemTxID, - lastFullBlockHeight: params.LastFullBlockHeight, - }, - log: params.Log, - staticCollectionRPC: params.CollectionRPC, - chainID: params.ChainID, - transactions: params.Transactions, - txResultErrorMessages: params.TxResultErrorMessages, - transactionValidator: txValidator, - transactionMetrics: params.AccessMetrics, - retry: retry, - connFactory: params.ConnFactory, - previousAccessNodes: params.HistoricalAccessNodes, - nodeCommunicator: params.Communicator, - txResultCache: txResCache, - txResultQueryMode: params.TxResultQueryMode, - systemTx: systemTx, - systemTxID: systemTxID, - execNodeIdentitiesProvider: params.ExecNodeIdentitiesProvider, - } - - // TODO: The TransactionErrorMessage interface should be reorganized in future, as it is implemented in backendTransactions but used in TransactionsLocalDataProvider, and its initialization is somewhat quirky. - b.backendTransactions.txErrorMessages = b - - b.backendSubscribeTransactions = backendSubscribeTransactions{ - backendTransactions: &b.backendTransactions, - log: params.Log, - subscriptionHandler: params.SubscriptionHandler, - blockTracker: params.BlockTracker, - sendTransaction: b.SendTransaction, + state: params.State, + collections: params.Collections, + staticCollectionRPC: params.CollectionRPC, + stateParams: params.State.Params(), + versionControl: params.VersionControl, + BlockTracker: params.BlockTracker, } - retry.SetBackend(b) - return b, nil } -func configureTransactionValidator( - state protocol.State, - chainID flow.ChainID, - indexReporter state_synchronization.IndexReporter, - transactionMetrics module.TransactionValidationMetrics, - executor execution.ScriptExecutor, - checkPayerBalanceMode validator.PayerBalanceMode, -) (*validator.TransactionValidator, error) { - return validator.NewTransactionValidator( - validator.NewProtocolStateBlocks(state, indexReporter), - chainID.Chain(), - transactionMetrics, - validator.TransactionValidationOptions{ - Expiry: flow.DefaultTransactionExpiry, - ExpiryBuffer: flow.DefaultTransactionExpiryBuffer, - AllowEmptyReferenceBlockID: false, - AllowUnknownReferenceBlockID: false, - CheckScriptsParse: false, - MaxGasLimit: flow.DefaultMaxTransactionGasLimit, - MaxTransactionByteSize: flow.DefaultMaxTransactionByteSize, - MaxCollectionByteSize: flow.DefaultMaxCollectionByteSize, - CheckPayerBalanceMode: checkPayerBalanceMode, - }, - executor, - ) -} - // Ping responds to requests when the server is up. func (b *Backend) Ping(ctx context.Context) error { // staticCollectionRPC is only set if a collection node address was provided at startup @@ -372,47 +417,6 @@ func (b *Backend) GetFullCollectionByID(_ context.Context, colID flow.Identifier func (b *Backend) GetNetworkParameters(_ context.Context) accessmodel.NetworkParameters { return accessmodel.NetworkParameters{ - ChainID: b.chainID, - } -} - -// resolveHeightError processes errors returned during height-based queries. -// If the error is due to a block not being found, this function determines whether the queried -// height falls outside the node's accessible range and provides context-sensitive error messages -// based on spork and node root block heights. -// -// Parameters: -// - stateParams: Protocol parameters that contain spork root and node root block heights. -// - height: The queried block height. -// - genericErr: The initial error returned when the block is not found. -// -// Expected errors during normal operation: -// - storage.ErrNotFound - Indicates that the queried block does not exist in the local database. -func resolveHeightError( - stateParams protocol.Params, - height uint64, - genericErr error, -) error { - if !errors.Is(genericErr, storage.ErrNotFound) { - return genericErr - } - - sporkRootBlockHeight := stateParams.SporkRootBlockHeight() - nodeRootBlockHeader := stateParams.SealedRoot().Height - - if height < sporkRootBlockHeight { - return fmt.Errorf("block height %d is less than the spork root block height %d. Try to use a historic node: %w", - height, - sporkRootBlockHeight, - genericErr, - ) - } else if height < nodeRootBlockHeader { - return fmt.Errorf("block height %d is less than the node's root block height %d. Try to use a different Access node: %w", - height, - nodeRootBlockHeader, - genericErr, - ) - } else { - return genericErr + ChainID: b.backendNetwork.chainID, } } diff --git a/engine/access/rpc/backend/backend_accounts.go b/engine/access/rpc/backend/backend_accounts.go deleted file mode 100644 index 609d1c063a8..00000000000 --- a/engine/access/rpc/backend/backend_accounts.go +++ /dev/null @@ -1,626 +0,0 @@ -package backend - -import ( - "bytes" - "context" - "errors" - "time" - - "github.com/rs/zerolog" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - execproto "github.com/onflow/flow/protobuf/go/flow/execution" - - "github.com/onflow/flow-go/engine/access/rpc/connection" - "github.com/onflow/flow-go/engine/common/rpc" - commonrpc "github.com/onflow/flow-go/engine/common/rpc" - "github.com/onflow/flow-go/engine/common/rpc/convert" - fvmerrors "github.com/onflow/flow-go/fvm/errors" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/execution" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" -) - -type backendAccounts struct { - log zerolog.Logger - state protocol.State - headers storage.Headers - connFactory connection.ConnectionFactory - nodeCommunicator Communicator - scriptExecutor execution.ScriptExecutor - scriptExecMode IndexQueryMode - execNodeIdentitiesProvider *commonrpc.ExecutionNodeIdentitiesProvider -} - -// GetAccount returns the account details at the latest sealed block. -// Alias for GetAccountAtLatestBlock -func (b *backendAccounts) GetAccount(ctx context.Context, address flow.Address) (*flow.Account, error) { - return b.GetAccountAtLatestBlock(ctx, address) -} - -// GetAccountAtLatestBlock returns the account details at the latest sealed block. -func (b *backendAccounts) GetAccountAtLatestBlock(ctx context.Context, address flow.Address) (*flow.Account, error) { - sealed, err := b.state.Sealed().Head() - if err != nil { - err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) - irrecoverable.Throw(ctx, err) - return nil, err - } - - sealedBlockID := sealed.ID() - - account, err := b.getAccountAtBlock(ctx, address, sealedBlockID, sealed.Height) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account at blockID: %v", sealedBlockID) - return nil, err - } - - return account, nil -} - -// GetAccountAtBlockHeight returns the account details at the given block height. -func (b *backendAccounts) GetAccountAtBlockHeight( - ctx context.Context, - address flow.Address, - height uint64, -) (*flow.Account, error) { - blockID, err := b.headers.BlockIDByHeight(height) - if err != nil { - return nil, rpc.ConvertStorageError(resolveHeightError(b.state.Params(), height, err)) - } - - account, err := b.getAccountAtBlock(ctx, address, blockID, height) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account at height: %d", height) - return nil, err - } - - return account, nil -} - -// GetAccountBalanceAtLatestBlock returns the account balance at the latest sealed block. -func (b *backendAccounts) GetAccountBalanceAtLatestBlock(ctx context.Context, address flow.Address) (uint64, error) { - sealed, err := b.state.Sealed().Head() - if err != nil { - err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) - irrecoverable.Throw(ctx, err) - return 0, err - } - - sealedBlockID := sealed.ID() - - balance, err := b.getAccountBalanceAtBlock(ctx, address, sealedBlockID, sealed.Height) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account balance at blockID: %v", sealedBlockID) - return 0, err - } - - return balance, nil -} - -// GetAccountBalanceAtBlockHeight returns the account balance at the given block height. -func (b *backendAccounts) GetAccountBalanceAtBlockHeight( - ctx context.Context, - address flow.Address, - height uint64, -) (uint64, error) { - blockID, err := b.headers.BlockIDByHeight(height) - if err != nil { - return 0, rpc.ConvertStorageError(resolveHeightError(b.state.Params(), height, err)) - } - - balance, err := b.getAccountBalanceAtBlock(ctx, address, blockID, height) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account balance at height: %v", height) - return 0, err - } - - return balance, nil -} - -// GetAccountKeyAtLatestBlock returns the account public key at the latest sealed block. -func (b *backendAccounts) GetAccountKeyAtLatestBlock( - ctx context.Context, - address flow.Address, - keyIndex uint32, -) (*flow.AccountPublicKey, error) { - sealed, err := b.state.Sealed().Head() - if err != nil { - err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) - irrecoverable.Throw(ctx, err) - return nil, err - } - - sealedBlockID := sealed.ID() - - accountKey, err := b.getAccountKeyAtBlock(ctx, address, keyIndex, sealedBlockID, sealed.Height) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account key at blockID: %v", sealedBlockID) - return nil, err - } - - return accountKey, nil -} - -// GetAccountKeysAtLatestBlock returns the account public keys at the latest sealed block. -func (b *backendAccounts) GetAccountKeysAtLatestBlock( - ctx context.Context, - address flow.Address, -) ([]flow.AccountPublicKey, error) { - sealed, err := b.state.Sealed().Head() - if err != nil { - err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) - irrecoverable.Throw(ctx, err) - return nil, err - } - - sealedBlockID := sealed.ID() - accountKeys, err := b.getAccountKeysAtBlock(ctx, address, sealedBlockID, sealed.Height) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account keys at blockID: %v", sealedBlockID) - return nil, err - } - - return accountKeys, nil - -} - -// GetAccountKeyAtBlockHeight returns the account public key by key index at the given block height. -func (b *backendAccounts) GetAccountKeyAtBlockHeight( - ctx context.Context, - address flow.Address, - keyIndex uint32, - height uint64, -) (*flow.AccountPublicKey, error) { - blockID, err := b.headers.BlockIDByHeight(height) - if err != nil { - return nil, rpc.ConvertStorageError(resolveHeightError(b.state.Params(), height, err)) - } - - accountKey, err := b.getAccountKeyAtBlock(ctx, address, keyIndex, blockID, height) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account key at height: %v", height) - return nil, err - } - - return accountKey, nil -} - -// GetAccountKeysAtBlockHeight returns the account public keys at the given block height. -func (b *backendAccounts) GetAccountKeysAtBlockHeight( - ctx context.Context, - address flow.Address, - height uint64, -) ([]flow.AccountPublicKey, error) { - blockID, err := b.headers.BlockIDByHeight(height) - if err != nil { - return nil, rpc.ConvertStorageError(resolveHeightError(b.state.Params(), height, err)) - } - - accountKeys, err := b.getAccountKeysAtBlock(ctx, address, blockID, height) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account keys at height: %v", height) - return nil, err - } - - return accountKeys, nil - -} - -// getAccountAtBlock returns the account details at the given block -// -// The data may be sourced from the local storage or from an execution node depending on the nodes's -// configuration and the availability of the data. -func (b *backendAccounts) getAccountAtBlock( - ctx context.Context, - address flow.Address, - blockID flow.Identifier, - height uint64, -) (*flow.Account, error) { - switch b.scriptExecMode { - case IndexQueryModeExecutionNodesOnly: - return b.getAccountFromAnyExeNode(ctx, address, blockID) - - case IndexQueryModeLocalOnly: - return b.getAccountFromLocalStorage(ctx, address, height) - - case IndexQueryModeFailover: - localResult, localErr := b.getAccountFromLocalStorage(ctx, address, height) - if localErr == nil { - return localResult, nil - } - execResult, execErr := b.getAccountFromAnyExeNode(ctx, address, blockID) - - b.compareAccountResults(execResult, execErr, localResult, localErr, blockID, address) - - return execResult, execErr - - case IndexQueryModeCompare: - execResult, execErr := b.getAccountFromAnyExeNode(ctx, address, blockID) - // Only compare actual get account errors from the EN, not system errors - if execErr != nil && !isInvalidArgumentError(execErr) { - return nil, execErr - } - localResult, localErr := b.getAccountFromLocalStorage(ctx, address, height) - - b.compareAccountResults(execResult, execErr, localResult, localErr, blockID, address) - - // always return EN results - return execResult, execErr - - default: - return nil, status.Errorf(codes.Internal, "unknown execution mode: %v", b.scriptExecMode) - } -} - -func (b *backendAccounts) getAccountBalanceAtBlock( - ctx context.Context, - address flow.Address, - blockID flow.Identifier, - height uint64, -) (uint64, error) { - switch b.scriptExecMode { - case IndexQueryModeExecutionNodesOnly: - account, err := b.getAccountFromAnyExeNode(ctx, address, blockID) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account balance at blockID: %v", blockID) - return 0, err - } - return account.Balance, nil - - case IndexQueryModeLocalOnly: - accountBalance, err := b.scriptExecutor.GetAccountBalance(ctx, address, height) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account balance at blockID: %v", blockID) - return 0, err - } - - return accountBalance, nil - case IndexQueryModeFailover: - localAccountBalance, localErr := b.scriptExecutor.GetAccountBalance(ctx, address, height) - if localErr == nil { - return localAccountBalance, nil - } - execResult, execErr := b.getAccountFromAnyExeNode(ctx, address, blockID) - if execErr != nil { - return 0, execErr - } - - return execResult.Balance, nil - - default: - return 0, status.Errorf(codes.Internal, "unknown execution mode: %v", b.scriptExecMode) - } -} - -func (b *backendAccounts) getAccountKeysAtBlock( - ctx context.Context, - address flow.Address, - blockID flow.Identifier, - height uint64, -) ([]flow.AccountPublicKey, error) { - switch b.scriptExecMode { - case IndexQueryModeExecutionNodesOnly: - account, err := b.getAccountFromAnyExeNode(ctx, address, blockID) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account keys at blockID: %v", blockID) - return nil, err - } - return account.Keys, nil - case IndexQueryModeLocalOnly: - accountKeys, err := b.scriptExecutor.GetAccountKeys(ctx, address, height) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account keys at height: %d", height) - return nil, err - } - - return accountKeys, nil - case IndexQueryModeFailover: - localAccountKeys, localErr := b.scriptExecutor.GetAccountKeys(ctx, address, height) - if localErr == nil { - return localAccountKeys, nil - } - - account, err := b.getAccountFromAnyExeNode(ctx, address, blockID) - if err != nil { - return nil, err - } - - return account.Keys, nil - - default: - return nil, status.Errorf(codes.Internal, "unknown execution mode: %v", b.scriptExecMode) - } - -} - -func (b *backendAccounts) getAccountKeyAtBlock( - ctx context.Context, - address flow.Address, - keyIndex uint32, - blockID flow.Identifier, - height uint64, -) (*flow.AccountPublicKey, error) { - switch b.scriptExecMode { - case IndexQueryModeExecutionNodesOnly: - account, err := b.getAccountFromAnyExeNode(ctx, address, blockID) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account key at blockID: %v", blockID) - return nil, err - } - - for _, key := range account.Keys { - if key.Index == keyIndex { - return &key, nil - } - } - - return nil, status.Errorf(codes.NotFound, "failed to get account key by index: %d", keyIndex) - case IndexQueryModeLocalOnly: - accountKey, err := b.scriptExecutor.GetAccountKey(ctx, address, keyIndex, height) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account key at height: %d", height) - return nil, err - } - - return accountKey, nil - case IndexQueryModeFailover: - localAccountKey, localErr := b.scriptExecutor.GetAccountKey(ctx, address, keyIndex, height) - if localErr == nil { - return localAccountKey, nil - } - - account, err := b.getAccountFromAnyExeNode(ctx, address, blockID) - if err != nil { - b.log.Debug().Err(err).Msgf("failed to get account key at blockID: %v", blockID) - return nil, err - } - - for _, key := range account.Keys { - if key.Index == keyIndex { - return &key, nil - } - } - - return nil, status.Errorf(codes.NotFound, "failed to get account key by index: %d", keyIndex) - - default: - return nil, status.Errorf(codes.Internal, "unknown execution mode: %v", b.scriptExecMode) - } -} - -// getAccountFromLocalStorage retrieves the given account from the local storage. -func (b *backendAccounts) getAccountFromLocalStorage( - ctx context.Context, - address flow.Address, - height uint64, -) (*flow.Account, error) { - // make sure data is available for the requested block - account, err := b.scriptExecutor.GetAccountAtBlockHeight(ctx, address, height) - if err != nil { - return nil, convertAccountError(resolveHeightError(b.state.Params(), height, err), address, height) - } - return account, nil -} - -// getAccountFromAnyExeNode retrieves the given account from any EN in `execNodes`. -// We attempt querying each EN in sequence. If any EN returns a valid response, then errors from -// other ENs are logged and swallowed. If all ENs fail to return a valid response, then an -// error aggregating all failures is returned. -func (b *backendAccounts) getAccountFromAnyExeNode( - ctx context.Context, - address flow.Address, - blockID flow.Identifier, -) (*flow.Account, error) { - req := &execproto.GetAccountAtBlockIDRequest{ - Address: address.Bytes(), - BlockId: blockID[:], - } - - execNodes, err := b.execNodeIdentitiesProvider.ExecutionNodesForBlockID( - ctx, - blockID, - ) - if err != nil { - return nil, rpc.ConvertError(err, "failed to find execution node to query", codes.Internal) - } - - var resp *execproto.GetAccountAtBlockIDResponse - errToReturn := b.nodeCommunicator.CallAvailableNode( - execNodes, - func(node *flow.IdentitySkeleton) error { - var err error - start := time.Now() - - resp, err = b.tryGetAccount(ctx, node, req) - duration := time.Since(start) - - lg := b.log.With(). - Str("execution_node", node.String()). - Hex("block_id", req.GetBlockId()). - Hex("address", req.GetAddress()). - Int64("rtt_ms", duration.Milliseconds()). - Logger() - - if err != nil { - lg.Err(err).Msg("failed to execute GetAccount") - return err - } - - // return if any execution node replied successfully - lg.Debug().Msg("Successfully got account info") - return nil - }, - nil, - ) - - if errToReturn != nil { - return nil, rpc.ConvertError(errToReturn, "failed to get account from the execution node", codes.Internal) - } - - account, err := convert.MessageToAccount(resp.GetAccount()) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to convert account message: %v", err) - } - - return account, nil -} - -// tryGetAccount attempts to get the account from the given execution node. -func (b *backendAccounts) tryGetAccount( - ctx context.Context, - execNode *flow.IdentitySkeleton, - req *execproto.GetAccountAtBlockIDRequest, -) (*execproto.GetAccountAtBlockIDResponse, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(execNode.Address) - if err != nil { - return nil, err - } - defer closer.Close() - - return execRPCClient.GetAccountAtBlockID(ctx, req) -} - -// compareAccountResults compares the result and error returned from local and remote getAccount calls -// and logs the results if they are different -func (b *backendAccounts) compareAccountResults( - execNodeResult *flow.Account, - execErr error, - localResult *flow.Account, - localErr error, - blockID flow.Identifier, - address flow.Address, -) { - if b.log.GetLevel() > zerolog.DebugLevel { - return - } - - lgCtx := b.log.With(). - Hex("block_id", blockID[:]). - Str("address", address.String()) - - // errors are different - if execErr != localErr { - lgCtx = lgCtx. - AnErr("execution_node_error", execErr). - AnErr("local_error", localErr) - - lg := lgCtx.Logger() - lg.Debug().Msg("errors from getting account on local and EN do not match") - return - } - - // both errors are nil, compare the accounts - if execErr == nil { - lgCtx, ok := compareAccountsLogger(execNodeResult, localResult, lgCtx) - if !ok { - lg := lgCtx.Logger() - lg.Debug().Msg("accounts from local and EN do not match") - } - } -} - -// compareAccountsLogger compares accounts produced by the execution node and local storage and -// return a logger configured to log the differences -func compareAccountsLogger(exec, local *flow.Account, lgCtx zerolog.Context) (zerolog.Context, bool) { - different := false - - if exec.Address != local.Address { - lgCtx = lgCtx. - Str("exec_node_address", exec.Address.String()). - Str("local_address", local.Address.String()) - different = true - } - - if exec.Balance != local.Balance { - lgCtx = lgCtx. - Uint64("exec_node_balance", exec.Balance). - Uint64("local_balance", local.Balance) - different = true - } - - contractListMatches := true - if len(exec.Contracts) != len(local.Contracts) { - lgCtx = lgCtx. - Int("exec_node_contract_count", len(exec.Contracts)). - Int("local_contract_count", len(local.Contracts)) - contractListMatches = false - different = true - } - - missingContracts := zerolog.Arr() - mismatchContracts := zerolog.Arr() - - for name, execContract := range exec.Contracts { - localContract, ok := local.Contracts[name] - - if !ok { - missingContracts.Str(name) - contractListMatches = false - different = true - } - - if !bytes.Equal(execContract, localContract) { - mismatchContracts.Str(name) - different = true - } - } - - lgCtx = lgCtx. - Array("missing_contracts", missingContracts). - Array("mismatch_contracts", mismatchContracts) - - // only check if there were any missing - if !contractListMatches { - extraContracts := zerolog.Arr() - for name := range local.Contracts { - if _, ok := exec.Contracts[name]; !ok { - extraContracts.Str(name) - different = true - } - } - lgCtx = lgCtx.Array("extra_contracts", extraContracts) - } - - if len(exec.Keys) != len(local.Keys) { - lgCtx = lgCtx. - Int("exec_node_key_count", len(exec.Keys)). - Int("local_key_count", len(local.Keys)) - different = true - } - - mismatchKeys := zerolog.Arr() - - for i, execKey := range exec.Keys { - localKey := local.Keys[i] - - if !execKey.PublicKey.Equals(localKey.PublicKey) { - mismatchKeys.Uint32(execKey.Index) - different = true - } - } - - lgCtx = lgCtx.Array("mismatch_keys", mismatchKeys) - - return lgCtx, !different -} - -// convertAccountError converts the script execution error to a gRPC error -func convertAccountError(err error, address flow.Address, height uint64) error { - if err == nil { - return nil - } - - if errors.Is(err, storage.ErrNotFound) { - return status.Errorf(codes.NotFound, "account with address %s not found: %v", address, err) - } - - if fvmerrors.IsAccountNotFoundError(err) { - return status.Errorf(codes.NotFound, "account not found") - } - - return rpc.ConvertIndexError(err, height, "failed to get account") -} diff --git a/engine/access/rpc/backend/backend_block_base.go b/engine/access/rpc/backend/backend_block_base.go new file mode 100644 index 00000000000..9ea0a9e0ce9 --- /dev/null +++ b/engine/access/rpc/backend/backend_block_base.go @@ -0,0 +1,48 @@ +package backend + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// backendBlockBase provides shared functionality for block status determination +type backendBlockBase struct { + blocks storage.Blocks + headers storage.Headers + state protocol.State +} + +// getBlockStatus returns the block status for a given header. +// +// No errors are expected during normal operations. +func (b *backendBlockBase) getBlockStatus(header *flow.Header) (flow.BlockStatus, error) { + // check which block is finalized at the target block's height + // note: this index is only populated for finalized blocks + blockIDFinalizedAtHeight, err := b.headers.BlockIDByHeight(header.Height) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return flow.BlockStatusUnknown, nil // height not indexed yet (not finalized) + } + return flow.BlockStatusUnknown, fmt.Errorf("failed to lookup block ID by height: %w", err) + } + + if blockIDFinalizedAtHeight != header.ID() { + // The queried block has been orphaned. It will never be finalized or sealed. + return flow.BlockStatusUnknown, nil + } + + sealed, err := b.state.Sealed().Head() + if err != nil { + return flow.BlockStatusUnknown, fmt.Errorf("failed to lookup sealed header: %w", err) + } + + if header.Height > sealed.Height { + return flow.BlockStatusFinalized, nil + } + + return flow.BlockStatusSealed, nil +} diff --git a/engine/access/rpc/backend/backend_block_details.go b/engine/access/rpc/backend/backend_block_details.go index c0a21f97b76..8cfb204aa44 100644 --- a/engine/access/rpc/backend/backend_block_details.go +++ b/engine/access/rpc/backend/backend_block_details.go @@ -6,49 +6,47 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" ) type backendBlockDetails struct { - blocks storage.Blocks - state protocol.State + backendBlockBase } func (b *backendBlockDetails) GetLatestBlock(ctx context.Context, isSealed bool) (*flow.Block, flow.BlockStatus, error) { var header *flow.Header + var blockStatus flow.BlockStatus var err error if isSealed { - // get the latest seal header from storage header, err = b.state.Sealed().Head() if err != nil { + // sealed header must exist in the db, otherwise the node's state may be corrupt err = irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) - + irrecoverable.Throw(ctx, err) + return nil, flow.BlockStatusUnknown, err } + blockStatus = flow.BlockStatusSealed } else { - // get the finalized header from state header, err = b.state.Final().Head() if err != nil { + // finalized header must exist in the db, otherwise the node's state may be corrupt err = irrecoverable.NewExceptionf("failed to lookup final header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, flow.BlockStatusUnknown, err } - } - if err != nil { - // node should always have the latest block - - // In the RPC engine, if we encounter an error from the protocol state indicating state corruption, - // we should halt processing requests, but do throw an exception which might cause a crash: - // - It is unsafe to process requests if we have an internally bad state. - // - We would like to avoid throwing an exception as a result of an Access API request by policy - // because this can cause DOS potential - // - Since the protocol state is widely shared, we assume that in practice another component will - // observe the protocol state error and throw an exception. - irrecoverable.Throw(ctx, err) - return nil, flow.BlockStatusUnknown, err + // Note: there is a corner case when requesting the latest finalized block before the + // consensus follower has progressed past the spork root block. In this case, the returned + // blockStatus will be finalized, however, the block is actually sealed. + if header.Height == b.state.Params().SporkRootBlockHeight() { + blockStatus = flow.BlockStatusSealed + } else { + blockStatus = flow.BlockStatusFinalized + } } // since we are querying a finalized or sealed block, we can use the height index and save an ID computation @@ -57,11 +55,7 @@ func (b *backendBlockDetails) GetLatestBlock(ctx context.Context, isSealed bool) return nil, flow.BlockStatusUnknown, status.Errorf(codes.Internal, "could not get latest block: %v", err) } - stat, err := b.getBlockStatus(ctx, block) - if err != nil { - return nil, stat, err - } - return block, stat, nil + return block, blockStatus, nil } func (b *backendBlockDetails) GetBlockByID(ctx context.Context, id flow.Identifier) (*flow.Block, flow.BlockStatus, error) { @@ -70,44 +64,28 @@ func (b *backendBlockDetails) GetBlockByID(ctx context.Context, id flow.Identifi return nil, flow.BlockStatusUnknown, rpc.ConvertStorageError(err) } - stat, err := b.getBlockStatus(ctx, block) + status, err := b.getBlockStatus(block.Header) if err != nil { - return nil, stat, err + // Any error returned is an indication of a bug or state corruption. we must not continue processing. + err = irrecoverable.NewException(err) + irrecoverable.Throw(ctx, err) + return nil, flow.BlockStatusUnknown, err } - return block, stat, nil + return block, status, nil } func (b *backendBlockDetails) GetBlockByHeight(ctx context.Context, height uint64) (*flow.Block, flow.BlockStatus, error) { block, err := b.blocks.ByHeight(height) if err != nil { - return nil, flow.BlockStatusUnknown, rpc.ConvertStorageError(resolveHeightError(b.state.Params(), height, err)) + return nil, flow.BlockStatusUnknown, rpc.ConvertStorageError(common.ResolveHeightError(b.state.Params(), height, err)) } - stat, err := b.getBlockStatus(ctx, block) + status, err := b.getBlockStatus(block.Header) if err != nil { - return nil, stat, err - } - return block, stat, nil -} - -// No errors are expected during normal operations. -func (b *backendBlockDetails) getBlockStatus(ctx context.Context, block *flow.Block) (flow.BlockStatus, error) { - sealed, err := b.state.Sealed().Head() - if err != nil { - // In the RPC engine, if we encounter an error from the protocol state indicating state corruption, - // we should halt processing requests, but do throw an exception which might cause a crash: - // - It is unsafe to process requests if we have an internally bad state. - // - We would like to avoid throwing an exception as a result of an Access API request by policy - // because this can cause DOS potential - // - Since the protocol state is widely shared, we assume that in practice another component will - // observe the protocol state error and throw an exception. - err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + // Any error returned is an indication of a bug or state corruption. we must not continue processing. + err = irrecoverable.NewException(err) irrecoverable.Throw(ctx, err) - return flow.BlockStatusUnknown, err - } - - if block.Header.Height > sealed.Height { - return flow.BlockStatusFinalized, nil + return nil, flow.BlockStatusUnknown, err } - return flow.BlockStatusSealed, nil + return block, status, nil } diff --git a/engine/access/rpc/backend/backend_block_headers.go b/engine/access/rpc/backend/backend_block_headers.go index d77bdc57819..562bbe4cfca 100644 --- a/engine/access/rpc/backend/backend_block_headers.go +++ b/engine/access/rpc/backend/backend_block_headers.go @@ -3,54 +3,44 @@ package backend import ( "context" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" ) type backendBlockHeaders struct { - headers storage.Headers - state protocol.State + backendBlockBase } func (b *backendBlockHeaders) GetLatestBlockHeader(ctx context.Context, isSealed bool) (*flow.Header, flow.BlockStatus, error) { - var header *flow.Header - var err error - if isSealed { - // get the latest seal header from storage - header, err = b.state.Sealed().Head() + header, err := b.state.Sealed().Head() if err != nil { + // sealed header must exist in the db, otherwise the node's state may be corrupt err = irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, flow.BlockStatusUnknown, err } - } else { - // get the finalized header from state - header, err = b.state.Final().Head() - if err != nil { - err = irrecoverable.NewExceptionf("failed to lookup final header: %w", err) - } + return header, flow.BlockStatusSealed, nil } + header, err := b.state.Final().Head() if err != nil { - // node should always have the latest block - // In the RPC engine, if we encounter an error from the protocol state indicating state corruption, - // we should halt processing requests, but do throw an exception which might cause a crash: - // - It is unsafe to process requests if we have an internally bad state. - // - We would like to avoid throwing an exception as a result of an Access API request by policy - // because this can cause DOS potential - // - Since the protocol state is widely shared, we assume that in practice another component will - // observe the protocol state error and throw an exception. + // finalized header must exist in the db, otherwise the node's state may be corrupt + err = irrecoverable.NewExceptionf("failed to lookup final header: %w", err) irrecoverable.Throw(ctx, err) return nil, flow.BlockStatusUnknown, err } - stat, err := b.getBlockStatus(ctx, header) - if err != nil { - return nil, stat, err + // Note: there is a corner case when requesting the latest finalized block before the + // consensus follower has progressed past the spork root block. In this case, the returned + // blockStatus will be finalized, however, the block is actually sealed. + if header.Height == b.state.Params().SporkRootBlockHeight() { + return header, flow.BlockStatusSealed, nil + } else { + return header, flow.BlockStatusFinalized, nil } - return header, stat, nil } func (b *backendBlockHeaders) GetBlockHeaderByID(ctx context.Context, id flow.Identifier) (*flow.Header, flow.BlockStatus, error) { @@ -59,44 +49,28 @@ func (b *backendBlockHeaders) GetBlockHeaderByID(ctx context.Context, id flow.Id return nil, flow.BlockStatusUnknown, rpc.ConvertStorageError(err) } - stat, err := b.getBlockStatus(ctx, header) + status, err := b.getBlockStatus(header) if err != nil { - return nil, stat, err + // Any error returned is an indication of a bug or state corruption. we must not continue processing. + err = irrecoverable.NewException(err) + irrecoverable.Throw(ctx, err) + return nil, flow.BlockStatusUnknown, err } - return header, stat, nil + return header, status, nil } func (b *backendBlockHeaders) GetBlockHeaderByHeight(ctx context.Context, height uint64) (*flow.Header, flow.BlockStatus, error) { header, err := b.headers.ByHeight(height) if err != nil { - return nil, flow.BlockStatusUnknown, rpc.ConvertStorageError(resolveHeightError(b.state.Params(), height, err)) + return nil, flow.BlockStatusUnknown, rpc.ConvertStorageError(common.ResolveHeightError(b.state.Params(), height, err)) } - stat, err := b.getBlockStatus(ctx, header) + status, err := b.getBlockStatus(header) if err != nil { - return nil, stat, err - } - return header, stat, nil -} - -// No errors are expected during normal operations. -func (b *backendBlockHeaders) getBlockStatus(ctx context.Context, header *flow.Header) (flow.BlockStatus, error) { - sealed, err := b.state.Sealed().Head() - if err != nil { - // In the RPC engine, if we encounter an error from the protocol state indicating state corruption, - // we should halt processing requests, but do throw an exception which might cause a crash: - // - It is unsafe to process requests if we have an internally bad State. - // - We would like to avoid throwing an exception as a result of an Access API request by policy - // because this can cause DOS potential - // - Since the protocol state is widely shared, we assume that in practice another component will - // observe the protocol state error and throw an exception. - err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + // Any error returned is an indication of a bug or state corruption. we must not continue processing. + err = irrecoverable.NewException(err) irrecoverable.Throw(ctx, err) - return flow.BlockStatusUnknown, err - } - - if header.Height > sealed.Height { - return flow.BlockStatusFinalized, nil + return nil, flow.BlockStatusUnknown, err } - return flow.BlockStatusSealed, nil + return header, status, nil } diff --git a/engine/access/rpc/backend/backend_events.go b/engine/access/rpc/backend/backend_events.go deleted file mode 100644 index 56f3207e8b7..00000000000 --- a/engine/access/rpc/backend/backend_events.go +++ /dev/null @@ -1,436 +0,0 @@ -package backend - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "sort" - "time" - - "github.com/onflow/flow/protobuf/go/flow/entities" - - execproto "github.com/onflow/flow/protobuf/go/flow/execution" - "github.com/rs/zerolog" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/engine/access/index" - "github.com/onflow/flow-go/engine/access/rpc/connection" - "github.com/onflow/flow-go/engine/common/rpc" - "github.com/onflow/flow-go/engine/common/rpc/convert" - "github.com/onflow/flow-go/model/events" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/module/state_synchronization/indexer" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" -) - -type backendEvents struct { - headers storage.Headers - state protocol.State - chain flow.Chain - connFactory connection.ConnectionFactory - log zerolog.Logger - maxHeightRange uint - nodeCommunicator Communicator - queryMode IndexQueryMode - eventsIndex *index.EventsIndex - execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider -} - -// blockMetadata is used to capture information about requested blocks to avoid repeated blockID -// calculations and passing around full block headers. -type blockMetadata struct { - ID flow.Identifier - Height uint64 - Timestamp time.Time -} - -// GetEventsForHeightRange retrieves events for all sealed blocks between the start block height and -// the end block height (inclusive) that have the given type. -func (b *backendEvents) GetEventsForHeightRange( - ctx context.Context, - eventType string, - startHeight, endHeight uint64, - requiredEventEncodingVersion entities.EventEncodingVersion, -) ([]flow.BlockEvents, error) { - - if endHeight < startHeight { - return nil, status.Error(codes.InvalidArgument, "start height must not be larger than end height") - } - - rangeSize := endHeight - startHeight + 1 // range is inclusive on both ends - if rangeSize > uint64(b.maxHeightRange) { - return nil, status.Errorf(codes.InvalidArgument, - "requested block range (%d) exceeded maximum (%d)", rangeSize, b.maxHeightRange) - } - - // get the latest sealed block header - sealed, err := b.state.Sealed().Head() - if err != nil { - // sealed block must be in the store, so throw an exception for any error - err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) - irrecoverable.Throw(ctx, err) - return nil, err - } - - // start height should not be beyond the last sealed height - if startHeight > sealed.Height { - return nil, status.Errorf(codes.OutOfRange, - "start height %d is greater than the last sealed block height %d", startHeight, sealed.Height) - } - - // limit max height to last sealed block in the chain - // - // Note: this causes unintuitive behavior for clients making requests through a proxy that - // fronts multiple nodes. With that setup, clients may receive responses for a smaller range - // than requested because the node serving the request has a slightly delayed view of the chain. - // - // An alternative option is to return an error here, but that's likely to cause more pain for - // these clients since the requests would intermittently fail. it's recommended instead to - // check the block height of the last message in the response. this will be the last block - // height searched, and can be used to determine the start height for the next range. - if endHeight > sealed.Height { - endHeight = sealed.Height - } - - // find the block headers for all the blocks between min and max height (inclusive) - blockHeaders := make([]blockMetadata, 0, endHeight-startHeight+1) - - for i := startHeight; i <= endHeight; i++ { - // this looks inefficient, but is actually what's done under the covers by `headers.ByHeight` - // and avoids calculating header.ID() for each block. - blockID, err := b.headers.BlockIDByHeight(i) - if err != nil { - return nil, rpc.ConvertStorageError(resolveHeightError(b.state.Params(), i, err)) - } - header, err := b.headers.ByBlockID(blockID) - if err != nil { - return nil, rpc.ConvertStorageError(fmt.Errorf("failed to get block header for %d: %w", i, err)) - } - - blockHeaders = append(blockHeaders, blockMetadata{ - ID: blockID, - Height: header.Height, - Timestamp: header.Timestamp, - }) - } - - return b.getBlockEvents(ctx, blockHeaders, eventType, requiredEventEncodingVersion) -} - -// GetEventsForBlockIDs retrieves events for all the specified block IDs that have the given type -func (b *backendEvents) GetEventsForBlockIDs( - ctx context.Context, - eventType string, - blockIDs []flow.Identifier, - requiredEventEncodingVersion entities.EventEncodingVersion, -) ([]flow.BlockEvents, error) { - - if uint(len(blockIDs)) > b.maxHeightRange { - return nil, status.Errorf(codes.InvalidArgument, "requested block range (%d) exceeded maximum (%d)", len(blockIDs), b.maxHeightRange) - } - - // find the block headers for all the block IDs - blockHeaders := make([]blockMetadata, 0, len(blockIDs)) - for _, blockID := range blockIDs { - header, err := b.headers.ByBlockID(blockID) - if err != nil { - return nil, rpc.ConvertStorageError(fmt.Errorf("failed to get block header for %s: %w", blockID, err)) - } - - blockHeaders = append(blockHeaders, blockMetadata{ - ID: blockID, - Height: header.Height, - Timestamp: header.Timestamp, - }) - } - - return b.getBlockEvents(ctx, blockHeaders, eventType, requiredEventEncodingVersion) -} - -// getBlockEvents retrieves events for all the specified blocks that have the given type -// It gets all events available in storage, and requests the rest from an execution node. -func (b *backendEvents) getBlockEvents( - ctx context.Context, - blockInfos []blockMetadata, - eventType string, - requiredEventEncodingVersion entities.EventEncodingVersion, -) ([]flow.BlockEvents, error) { - target := flow.EventType(eventType) - - if _, err := events.ValidateEvent(target, b.chain); err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid event type: %v", err) - } - - switch b.queryMode { - case IndexQueryModeExecutionNodesOnly: - return b.getBlockEventsFromExecutionNode(ctx, blockInfos, eventType, requiredEventEncodingVersion) - - case IndexQueryModeLocalOnly: - localResponse, missingBlocks, err := b.getBlockEventsFromStorage(ctx, blockInfos, target, requiredEventEncodingVersion) - if err != nil { - return nil, err - } - // all blocks should be available. - if len(missingBlocks) > 0 { - return nil, status.Errorf(codes.NotFound, "events not found in local storage for %d blocks", len(missingBlocks)) - } - return localResponse, nil - - case IndexQueryModeFailover: - localResponse, missingBlocks, err := b.getBlockEventsFromStorage(ctx, blockInfos, target, requiredEventEncodingVersion) - if err != nil { - // if there was an error, request all blocks from execution nodes - missingBlocks = blockInfos - b.log.Debug().Err(err).Msg("failed to get events from local storage") - } - - if len(missingBlocks) == 0 { - return localResponse, nil - } - - b.log.Debug(). - Int("missing_blocks", len(missingBlocks)). - Msg("querying execution nodes for events from missing blocks") - - enResponse, err := b.getBlockEventsFromExecutionNode(ctx, missingBlocks, eventType, requiredEventEncodingVersion) - if err != nil { - return nil, err - } - - // sort ascending by block height - // this is needed because some blocks may be retrieved from storage and others from execution nodes. - // most likely, the earlier blocks will all be found in local storage, but that's not guaranteed, - // especially for nodes started after a spork, or once pruning is enabled. - // Note: this may not match the order of the original request for clients using GetEventsForBlockIDs - // that provide out of order block IDs - response := append(localResponse, enResponse...) - sort.Slice(response, func(i, j int) bool { - return response[i].BlockHeight < response[j].BlockHeight - }) - return response, nil - - default: - return nil, status.Errorf(codes.Internal, "unknown event query mode: %v", b.queryMode) - } -} - -// getBlockEventsFromStorage retrieves events for all the specified blocks that have the given type -// from the local storage -func (b *backendEvents) getBlockEventsFromStorage( - ctx context.Context, - blockInfos []blockMetadata, - eventType flow.EventType, - requiredEventEncodingVersion entities.EventEncodingVersion, -) ([]flow.BlockEvents, []blockMetadata, error) { - missing := make([]blockMetadata, 0) - resp := make([]flow.BlockEvents, 0) - - for _, blockInfo := range blockInfos { - if ctx.Err() != nil { - return nil, nil, rpc.ConvertError(ctx.Err(), "failed to get events from storage", codes.Canceled) - } - - events, err := b.eventsIndex.ByBlockID(blockInfo.ID, blockInfo.Height) - if err != nil { - if errors.Is(err, storage.ErrNotFound) || - errors.Is(err, storage.ErrHeightNotIndexed) || - errors.Is(err, indexer.ErrIndexNotInitialized) { - missing = append(missing, blockInfo) - continue - } - err = fmt.Errorf("failed to get events for block %s: %w", blockInfo.ID, err) - return nil, nil, rpc.ConvertError(err, "failed to get events from storage", codes.Internal) - } - - filteredEvents := make([]flow.Event, 0) - for _, e := range events { - if e.Type != eventType { - continue - } - - // events are encoded in CCF format in storage. convert to JSON-CDC if requested - if requiredEventEncodingVersion == entities.EventEncodingVersion_JSON_CDC_V0 { - payload, err := convert.CcfPayloadToJsonPayload(e.Payload) - if err != nil { - err = fmt.Errorf("failed to convert event payload for block %s: %w", blockInfo.ID, err) - return nil, nil, rpc.ConvertError(err, "failed to convert event payload", codes.Internal) - } - e.Payload = payload - } - - filteredEvents = append(filteredEvents, e) - } - - resp = append(resp, flow.BlockEvents{ - BlockID: blockInfo.ID, - BlockHeight: blockInfo.Height, - BlockTimestamp: blockInfo.Timestamp, - Events: filteredEvents, - }) - } - - return resp, missing, nil -} - -// getBlockEventsFromExecutionNode retrieves events for all the specified blocks that have the given type -// from an execution node -func (b *backendEvents) getBlockEventsFromExecutionNode( - ctx context.Context, - blockInfos []blockMetadata, - eventType string, - requiredEventEncodingVersion entities.EventEncodingVersion, -) ([]flow.BlockEvents, error) { - - // create an execution API request for events at block ID - blockIDs := make([]flow.Identifier, len(blockInfos)) - for i := range blockInfos { - blockIDs[i] = blockInfos[i].ID - } - - if len(blockIDs) == 0 { - return []flow.BlockEvents{}, nil - } - - req := &execproto.GetEventsForBlockIDsRequest{ - Type: eventType, - BlockIds: convert.IdentifiersToMessages(blockIDs), - } - - // choose the last block ID to find the list of execution nodes - lastBlockID := blockIDs[len(blockIDs)-1] - - execNodes, err := b.execNodeIdentitiesProvider.ExecutionNodesForBlockID( - ctx, - lastBlockID, - ) - if err != nil { - return nil, rpc.ConvertError(err, "failed to retrieve events from execution node", codes.Internal) - } - - var resp *execproto.GetEventsForBlockIDsResponse - var successfulNode *flow.IdentitySkeleton - resp, successfulNode, err = b.getEventsFromAnyExeNode(ctx, execNodes, req) - if err != nil { - return nil, rpc.ConvertError(err, "failed to retrieve events from execution nodes", codes.Internal) - } - b.log.Trace(). - Str("execution_id", successfulNode.String()). - Str("last_block_id", lastBlockID.String()). - Msg("successfully got events") - - // convert execution node api result to access node api result - results, err := verifyAndConvertToAccessEvents( - resp.GetResults(), - blockInfos, - resp.GetEventEncodingVersion(), - requiredEventEncodingVersion, - ) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to verify retrieved events from execution node: %v", err) - } - - return results, nil -} - -// verifyAndConvertToAccessEvents converts execution node api result to access node api result, -// and verifies that the results contains results from each block that was requested -func verifyAndConvertToAccessEvents( - execEvents []*execproto.GetEventsForBlockIDsResponse_Result, - requestedBlockInfos []blockMetadata, - from entities.EventEncodingVersion, - to entities.EventEncodingVersion, -) ([]flow.BlockEvents, error) { - if len(execEvents) != len(requestedBlockInfos) { - return nil, errors.New("number of results does not match number of blocks requested") - } - - requestedBlockInfoSet := map[string]blockMetadata{} - for _, header := range requestedBlockInfos { - requestedBlockInfoSet[header.ID.String()] = header - } - - results := make([]flow.BlockEvents, len(execEvents)) - - for i, result := range execEvents { - blockInfo, expected := requestedBlockInfoSet[hex.EncodeToString(result.GetBlockId())] - if !expected { - return nil, fmt.Errorf("unexpected blockID from exe node %x", result.GetBlockId()) - } - if result.GetBlockHeight() != blockInfo.Height { - return nil, fmt.Errorf("unexpected block height %d for block %x from exe node", - result.GetBlockHeight(), - result.GetBlockId()) - } - - events, err := convert.MessagesToEventsWithEncodingConversion(result.GetEvents(), from, to) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal events in event %d with encoding version %s: %w", - i, to.String(), err) - } - - results[i] = flow.BlockEvents{ - BlockID: blockInfo.ID, - BlockHeight: blockInfo.Height, - BlockTimestamp: blockInfo.Timestamp, - Events: events, - } - } - - return results, nil -} - -// getEventsFromAnyExeNode retrieves the given events from any EN in `execNodes`. -// We attempt querying each EN in sequence. If any EN returns a valid response, then errors from -// other ENs are logged and swallowed. If all ENs fail to return a valid response, then an -// error aggregating all failures is returned. -func (b *backendEvents) getEventsFromAnyExeNode(ctx context.Context, - execNodes flow.IdentitySkeletonList, - req *execproto.GetEventsForBlockIDsRequest) (*execproto.GetEventsForBlockIDsResponse, *flow.IdentitySkeleton, error) { - var resp *execproto.GetEventsForBlockIDsResponse - var execNode *flow.IdentitySkeleton - errToReturn := b.nodeCommunicator.CallAvailableNode( - execNodes, - func(node *flow.IdentitySkeleton) error { - var err error - start := time.Now() - resp, err = b.tryGetEvents(ctx, node, req) - duration := time.Since(start) - - logger := b.log.With(). - Str("execution_node", node.String()). - Str("event", req.GetType()). - Int("blocks", len(req.BlockIds)). - Int64("rtt_ms", duration.Milliseconds()). - Logger() - - if err == nil { - // return if any execution node replied successfully - logger.Debug().Msg("Successfully got events") - execNode = node - return nil - } - - logger.Err(err).Msg("failed to execute GetEvents") - return err - }, - nil, - ) - - return resp, execNode, errToReturn -} - -func (b *backendEvents) tryGetEvents(ctx context.Context, - execNode *flow.IdentitySkeleton, - req *execproto.GetEventsForBlockIDsRequest) (*execproto.GetEventsForBlockIDsResponse, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(execNode.Address) - if err != nil { - return nil, err - } - defer closer.Close() - - return execRPCClient.GetEventsForBlockIDs(ctx, req) -} diff --git a/engine/access/rpc/backend/backend_scripts.go b/engine/access/rpc/backend/backend_scripts.go deleted file mode 100644 index b0237447d46..00000000000 --- a/engine/access/rpc/backend/backend_scripts.go +++ /dev/null @@ -1,368 +0,0 @@ -package backend - -import ( - "context" - "crypto/md5" //nolint:gosec - "time" - - lru "github.com/hashicorp/golang-lru/v2" - execproto "github.com/onflow/flow/protobuf/go/flow/execution" - "github.com/rs/zerolog" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/engine/access/rpc/connection" - "github.com/onflow/flow-go/engine/common/rpc" - commonrpc "github.com/onflow/flow-go/engine/common/rpc" - fvmerrors "github.com/onflow/flow-go/fvm/errors" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/execution" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/logging" -) - -// uniqueScriptLoggingTimeWindow is the duration for checking the uniqueness of scripts sent for execution -const uniqueScriptLoggingTimeWindow = 10 * time.Minute - -type backendScripts struct { - log zerolog.Logger - headers storage.Headers - state protocol.State - connFactory connection.ConnectionFactory - metrics module.BackendScriptsMetrics - loggedScripts *lru.Cache[[md5.Size]byte, time.Time] - nodeCommunicator Communicator - scriptExecutor execution.ScriptExecutor - scriptExecMode IndexQueryMode - execNodeIdentitiesProvider *commonrpc.ExecutionNodeIdentitiesProvider -} - -// scriptExecutionRequest encapsulates the data needed to execute a script to make it easier -// to pass around between the various methods involved in script execution -type scriptExecutionRequest struct { - blockID flow.Identifier - height uint64 - script []byte - arguments [][]byte - insecureScriptHash [md5.Size]byte -} - -func newScriptExecutionRequest(blockID flow.Identifier, height uint64, script []byte, arguments [][]byte) *scriptExecutionRequest { - return &scriptExecutionRequest{ - blockID: blockID, - height: height, - script: script, - arguments: arguments, - - // encode to MD5 as low compute/memory lookup key - // CAUTION: cryptographically insecure md5 is used here, but only to de-duplicate logs. - // *DO NOT* use this hash for any protocol-related or cryptographic functions. - insecureScriptHash: md5.Sum(script), //nolint:gosec - } -} - -// ExecuteScriptAtLatestBlock executes provided script at the latest sealed block. -func (b *backendScripts) ExecuteScriptAtLatestBlock( - ctx context.Context, - script []byte, - arguments [][]byte, -) ([]byte, error) { - latestHeader, err := b.state.Sealed().Head() - if err != nil { - // the latest sealed header MUST be available - err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) - irrecoverable.Throw(ctx, err) - return nil, err - } - - return b.executeScript(ctx, newScriptExecutionRequest(latestHeader.ID(), latestHeader.Height, script, arguments)) -} - -// ExecuteScriptAtBlockID executes provided script at the provided block ID. -func (b *backendScripts) ExecuteScriptAtBlockID( - ctx context.Context, - blockID flow.Identifier, - script []byte, - arguments [][]byte, -) ([]byte, error) { - header, err := b.headers.ByBlockID(blockID) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - return b.executeScript(ctx, newScriptExecutionRequest(blockID, header.Height, script, arguments)) -} - -// ExecuteScriptAtBlockHeight executes provided script at the provided block height. -func (b *backendScripts) ExecuteScriptAtBlockHeight( - ctx context.Context, - blockHeight uint64, - script []byte, - arguments [][]byte, -) ([]byte, error) { - header, err := b.headers.ByHeight(blockHeight) - if err != nil { - return nil, rpc.ConvertStorageError(resolveHeightError(b.state.Params(), blockHeight, err)) - } - - return b.executeScript(ctx, newScriptExecutionRequest(header.ID(), blockHeight, script, arguments)) -} - -// executeScript executes the provided script using either the local execution state or the execution -// nodes depending on the node's configuration and the availability of the data. -func (b *backendScripts) executeScript( - ctx context.Context, - scriptRequest *scriptExecutionRequest, -) ([]byte, error) { - switch b.scriptExecMode { - case IndexQueryModeExecutionNodesOnly: - result, _, err := b.executeScriptOnAvailableExecutionNodes(ctx, scriptRequest) - return result, err - - case IndexQueryModeLocalOnly: - result, _, err := b.executeScriptLocally(ctx, scriptRequest) - return result, err - - case IndexQueryModeFailover: - localResult, localDuration, localErr := b.executeScriptLocally(ctx, scriptRequest) - if localErr == nil || isInvalidArgumentError(localErr) || status.Code(localErr) == codes.Canceled { - return localResult, localErr - } - // Note: scripts that timeout are retried on the execution nodes since ANs may have performance - // issues for some scripts. - execResult, execDuration, execErr := b.executeScriptOnAvailableExecutionNodes(ctx, scriptRequest) - - resultComparer := newScriptResultComparison(b.log, b.metrics, b.shouldLogScript, scriptRequest) - _ = resultComparer.compare( - newScriptResult(execResult, execDuration, execErr), - newScriptResult(localResult, localDuration, localErr), - ) - - return execResult, execErr - - case IndexQueryModeCompare: - execResult, execDuration, execErr := b.executeScriptOnAvailableExecutionNodes(ctx, scriptRequest) - // we can only compare the results if there were either no errors or a cadence error - // since we cannot distinguish the EN error as caused by the block being pruned or some other reason, - // which may produce a valid RN output but an error for the EN - if execErr != nil && !isInvalidArgumentError(execErr) { - return nil, execErr - } - localResult, localDuration, localErr := b.executeScriptLocally(ctx, scriptRequest) - - resultComparer := newScriptResultComparison(b.log, b.metrics, b.shouldLogScript, scriptRequest) - _ = resultComparer.compare( - newScriptResult(execResult, execDuration, execErr), - newScriptResult(localResult, localDuration, localErr), - ) - - // always return EN results - return execResult, execErr - - default: - return nil, status.Errorf(codes.Internal, "unknown script execution mode: %v", b.scriptExecMode) - } -} - -// executeScriptLocally executes the provided script using the local execution state. -func (b *backendScripts) executeScriptLocally( - ctx context.Context, - r *scriptExecutionRequest, -) ([]byte, time.Duration, error) { - execStartTime := time.Now() - - result, err := b.scriptExecutor.ExecuteAtBlockHeight(ctx, r.script, r.arguments, r.height) - - execEndTime := time.Now() - execDuration := execEndTime.Sub(execStartTime) - - lg := b.log.With(). - Str("script_executor_addr", "localhost"). - Hex("block_id", logging.ID(r.blockID)). - Uint64("height", r.height). - Hex("script_hash", r.insecureScriptHash[:]). - Dur("execution_dur_ms", execDuration). - Logger() - - if err != nil { - convertedErr := convertScriptExecutionError(err, r.height) - - switch status.Code(convertedErr) { - case codes.InvalidArgument, codes.Canceled, codes.DeadlineExceeded: - logEvent := lg.Debug().Err(err) - if b.shouldLogScript(execEndTime, r.insecureScriptHash) { - logEvent.Str("script", string(r.script)) - } - logEvent.Msg("script failed to execute locally") - - default: - lg.Debug().Err(err).Msg("script execution failed") - b.metrics.ScriptExecutionErrorLocal() - } - - return nil, execDuration, convertedErr - } - - if b.shouldLogScript(execEndTime, r.insecureScriptHash) { - lg.Debug(). - Str("script", string(r.script)). - Msg("Successfully executed script") - b.loggedScripts.Add(r.insecureScriptHash, execEndTime) - } - - // log execution time - b.metrics.ScriptExecuted(execDuration, len(r.script)) - - return result, execDuration, nil -} - -// executeScriptOnAvailableExecutionNodes executes the provided script using available execution nodes. -func (b *backendScripts) executeScriptOnAvailableExecutionNodes( - ctx context.Context, - r *scriptExecutionRequest, -) ([]byte, time.Duration, error) { - // find few execution nodes which have executed the block earlier and provided an execution receipt for it - executors, err := b.execNodeIdentitiesProvider.ExecutionNodesForBlockID(ctx, r.blockID) - if err != nil { - return nil, 0, status.Errorf(codes.Internal, "failed to find script executors at blockId %v: %v", r.blockID.String(), err) - } - - lg := b.log.With(). - Hex("block_id", logging.ID(r.blockID)). - Hex("script_hash", r.insecureScriptHash[:]). - Logger() - - var result []byte - var executionTime time.Time - var execDuration time.Duration - errToReturn := b.nodeCommunicator.CallAvailableNode( - executors, - func(node *flow.IdentitySkeleton) error { - execStartTime := time.Now() - - result, err = b.tryExecuteScriptOnExecutionNode(ctx, node.Address, r) - - executionTime = time.Now() - execDuration = executionTime.Sub(execStartTime) - - if err != nil { - return err - } - - if b.shouldLogScript(executionTime, r.insecureScriptHash) { - lg.Debug(). - Str("script_executor_addr", node.Address). - Str("script", string(r.script)). - Dur("execution_dur_ms", execDuration). - Msg("Successfully executed script") - b.loggedScripts.Add(r.insecureScriptHash, executionTime) - } - - // log execution time - b.metrics.ScriptExecuted(time.Since(execStartTime), len(r.script)) - - return nil - }, - func(node *flow.IdentitySkeleton, err error) bool { - if status.Code(err) == codes.InvalidArgument { - logEvent := lg.Debug().Err(err).Str("script_executor_addr", node.Address) - if b.shouldLogScript(executionTime, r.insecureScriptHash) { - logEvent.Str("script", string(r.script)) - } - logEvent.Msg("script failed to execute on the execution node") - return true - } - return false - }, - ) - - if errToReturn != nil { - if status.Code(errToReturn) != codes.InvalidArgument { - b.metrics.ScriptExecutionErrorOnExecutionNode() - b.log.Error().Err(errToReturn).Msg("script execution failed for execution node internal reasons") - } - return nil, execDuration, rpc.ConvertError(errToReturn, "failed to execute script on execution nodes", codes.Internal) - } - - return result, execDuration, nil -} - -// tryExecuteScriptOnExecutionNode attempts to execute the script on the given execution node. -func (b *backendScripts) tryExecuteScriptOnExecutionNode( - ctx context.Context, - executorAddress string, - r *scriptExecutionRequest, -) ([]byte, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(executorAddress) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to create client for execution node %s: %v", - executorAddress, err) - } - defer closer.Close() - - execResp, err := execRPCClient.ExecuteScriptAtBlockID(ctx, &execproto.ExecuteScriptAtBlockIDRequest{ - BlockId: r.blockID[:], - Script: r.script, - Arguments: r.arguments, - }) - if err != nil { - return nil, status.Errorf(status.Code(err), "failed to execute the script on the execution node %s: %v", executorAddress, err) - } - return execResp.GetValue(), nil -} - -// isInvalidArgumentError checks if the error is from an invalid argument -func isInvalidArgumentError(scriptExecutionErr error) bool { - return status.Code(scriptExecutionErr) == codes.InvalidArgument -} - -// shouldLogScript checks if the script hash is unique in the time window -func (b *backendScripts) shouldLogScript(execTime time.Time, scriptHash [md5.Size]byte) bool { - if b.log.GetLevel() > zerolog.DebugLevel { - return false - } - timestamp, seen := b.loggedScripts.Get(scriptHash) - if seen { - return execTime.Sub(timestamp) >= uniqueScriptLoggingTimeWindow - } - return true -} - -// convertScriptExecutionError converts the script execution error to a gRPC error -func convertScriptExecutionError(err error, height uint64) error { - if err == nil { - return nil - } - - var failure fvmerrors.CodedFailure - if fvmerrors.As(err, &failure) { - return rpc.ConvertError(err, "failed to execute script", codes.Internal) - } - - // general FVM/ledger errors - var coded fvmerrors.CodedError - if fvmerrors.As(err, &coded) { - switch coded.Code() { - case fvmerrors.ErrCodeScriptExecutionCancelledError: - return status.Errorf(codes.Canceled, "script execution canceled: %v", err) - - case fvmerrors.ErrCodeScriptExecutionTimedOutError: - return status.Errorf(codes.DeadlineExceeded, "script execution timed out: %v", err) - - case fvmerrors.ErrCodeComputationLimitExceededError: - return status.Errorf(codes.ResourceExhausted, "script execution computation limit exceeded: %v", err) - - case fvmerrors.ErrCodeMemoryLimitExceededError: - return status.Errorf(codes.ResourceExhausted, "script execution memory limit exceeded: %v", err) - - default: - // runtime errors - return status.Errorf(codes.InvalidArgument, "failed to execute script: %v", err) - } - } - - return rpc.ConvertIndexError(err, height, "failed to execute script") -} diff --git a/engine/access/rpc/backend/backend_stream_blocks_test.go b/engine/access/rpc/backend/backend_stream_blocks_test.go index 296c82921f5..c4990435c20 100644 --- a/engine/access/rpc/backend/backend_stream_blocks_test.go +++ b/engine/access/rpc/backend/backend_stream_blocks_test.go @@ -15,6 +15,8 @@ import ( "google.golang.org/grpc/status" "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/access/rpc/backend/events" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" "github.com/onflow/flow-go/engine/access/subscription" "github.com/onflow/flow-go/engine/access/subscription/tracker" @@ -152,7 +154,7 @@ func (s *BackendBlocksSuite) backendParams() Params { Blocks: s.blocks, Headers: s.headers, ChainID: s.chainID, - MaxHeightRange: DefaultMaxHeightRange, + MaxHeightRange: events.DefaultMaxHeightRange, SnapshotHistoryLimit: DefaultSnapshotHistoryLimit, AccessMetrics: metrics.NewNoopCollector(), Log: s.log, @@ -163,7 +165,10 @@ func (s *BackendBlocksSuite) backendParams() Params { subscription.DefaultResponseLimit, subscription.DefaultSendBufferSize, ), - BlockTracker: s.blockTracker, + BlockTracker: s.blockTracker, + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, } } diff --git a/engine/access/rpc/backend/backend_test.go b/engine/access/rpc/backend/backend_test.go index d8abac49b58..beded849522 100644 --- a/engine/access/rpc/backend/backend_test.go +++ b/engine/access/rpc/backend/backend_test.go @@ -25,7 +25,11 @@ import ( "github.com/onflow/flow-go/cmd/build" accessmock "github.com/onflow/flow-go/engine/access/mock" - backendmock "github.com/onflow/flow-go/engine/access/rpc/backend/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/events" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + communicatormock "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" "github.com/onflow/flow-go/engine/access/rpc/connection" connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" commonrpc "github.com/onflow/flow-go/engine/common/rpc" @@ -87,7 +91,7 @@ type Suite struct { historicalAccessClient *accessmock.AccessAPIClient connectionFactory *connectionmock.ConnectionFactory - communicator *backendmock.Communicator + communicator *communicatormock.Communicator chainID flow.ChainID systemTx *flow.TransactionBody @@ -127,7 +131,7 @@ func (suite *Suite) SetupTest() { suite.historicalAccessClient = new(accessmock.AccessAPIClient) suite.connectionFactory = connectionmock.NewConnectionFactory(suite.T()) - suite.communicator = new(backendmock.Communicator) + suite.communicator = new(communicatormock.Communicator) var err error suite.systemTx, err = blueprints.SystemChunkTransaction(flow.Testnet.Chain()) @@ -169,8 +173,6 @@ func (suite *Suite) TestGetLatestFinalizedBlockHeader() { block := unittest.BlockHeaderFixture() suite.state.On("Final").Return(suite.snapshot, nil).Maybe() suite.snapshot.On("Head").Return(block, nil).Once() - suite.state.On("Sealed").Return(suite.snapshot, nil) - suite.snapshot.On("Head").Return(block, nil).Once() params := suite.defaultBackendParams() @@ -179,13 +181,14 @@ func (suite *Suite) TestGetLatestFinalizedBlockHeader() { // query the handler for the latest finalized block header, stat, err := backend.GetLatestBlockHeader(context.Background(), false) - suite.checkResponse(header, err) + suite.Require().NoError(err) + suite.Require().NotNil(header) // make sure we got the latest block suite.Require().Equal(block.ID(), header.ID()) suite.Require().Equal(block.Height, header.Height) suite.Require().Equal(block.ParentID, header.ParentID) - suite.Require().Equal(stat, flow.BlockStatusSealed) + suite.Require().Equal(stat, flow.BlockStatusFinalized) suite.assertAllExpectations() } @@ -818,11 +821,11 @@ func (suite *Suite) TestGetLatestSealedBlockHeader() { suite.Run("GetLatestSealedBlockHeader - happy path", func() { block := unittest.BlockHeaderFixture() suite.snapshot.On("Head").Return(block, nil).Once() - suite.snapshot.On("Head").Return(block, nil).Once() // query the handler for the latest sealed block header, stat, err := backend.GetLatestBlockHeader(context.Background(), true) - suite.checkResponse(header, err) + suite.Require().NoError(err) + suite.Require().NotNil(header) // make sure we got the latest sealed block suite.Require().Equal(block.ID(), header.ID()) @@ -867,7 +870,8 @@ func (suite *Suite) TestGetTransaction() { suite.Require().NoError(err) actual, err := backend.GetTransaction(context.Background(), transaction.ID()) - suite.checkResponse(actual, err) + suite.Require().NoError(err) + suite.Require().NotNil(actual) suite.Require().Equal(expected, *actual) @@ -891,7 +895,8 @@ func (suite *Suite) TestGetCollection() { actual, err := backend.GetCollectionByID(context.Background(), expected.ID()) suite.transactions.AssertExpectations(suite.T()) - suite.checkResponse(actual, err) + suite.Require().NoError(err) + suite.Require().NotNil(actual) suite.Equal(expected, *actual) suite.assertAllExpectations() @@ -940,7 +945,8 @@ func (suite *Suite) TestGetTransactionResultByIndex() { suite.Run("TestGetTransactionResultByIndex - happy path", func() { suite.snapshot.On("Head").Return(block.Header, nil).Once() result, err := backend.GetTransactionResultByIndex(ctx, blockId, index, entitiesproto.EventEncodingVersion_JSON_CDC_V0) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) suite.Assert().Equal(result.BlockHeight, block.Header.Height) suite.assertAllExpectations() @@ -1006,7 +1012,8 @@ func (suite *Suite) TestGetTransactionResultsByBlockID() { suite.snapshot.On("Head").Return(block.Header, nil).Once() result, err := backend.GetTransactionResultsByBlockID(ctx, blockId, entitiesproto.EventEncodingVersion_JSON_CDC_V0) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) suite.assertAllExpectations() }) @@ -1098,7 +1105,8 @@ func (suite *Suite) TestTransactionStatusTransition() { // first call - when block under test is greater height than the sealed head, but execution node does not know about Tx result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) // status should be finalized since the sealed Blocks is smaller in height suite.Assert().Equal(flow.TransactionStatusFinalized, result.Status) @@ -1113,7 +1121,8 @@ func (suite *Suite) TestTransactionStatusTransition() { // second call - when block under test's height is greater height than the sealed head result, err = backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) // status should be executed since no `NotFound` error in the `GetTransactionResult` call suite.Assert().Equal(flow.TransactionStatusExecuted, result.Status) @@ -1123,7 +1132,8 @@ func (suite *Suite) TestTransactionStatusTransition() { // third call - when block under test's height is less than sealed head's height result, err = backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) // status should be sealed since the sealed Blocks is greater in height suite.Assert().Equal(flow.TransactionStatusSealed, result.Status) @@ -1134,7 +1144,8 @@ func (suite *Suite) TestTransactionStatusTransition() { // fourth call - when block under test's height so much less than the head's height that it's considered expired, // but since there is a execution result, means it should retain it's sealed status result, err = backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) // status should be expired since suite.Assert().Equal(flow.TransactionStatusSealed, result.Status) @@ -1193,7 +1204,8 @@ func (suite *Suite) TestTransactionExpiredStatusTransition() { suite.Run("pending", func() { // referenced block isn't known yet, so should return pending status result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) suite.Assert().Equal(flow.TransactionStatusPending, result.Status) }) @@ -1210,7 +1222,8 @@ func (suite *Suite) TestTransactionExpiredStatusTransition() { suite.Require().NoError(err) result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) suite.Assert().Equal(flow.TransactionStatusPending, result.Status) }) @@ -1224,7 +1237,8 @@ func (suite *Suite) TestTransactionExpiredStatusTransition() { headBlock.Header.Height = block.Header.Height + flow.DefaultTransactionExpiry/2 result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) suite.Assert().Equal(flow.TransactionStatusPending, result.Status) }) @@ -1235,7 +1249,8 @@ func (suite *Suite) TestTransactionExpiredStatusTransition() { headBlock.Header.Height = block.Header.Height + flow.DefaultTransactionExpiry + 1 result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) suite.Assert().Equal(flow.TransactionStatusExpired, result.Status) }) }) @@ -1347,7 +1362,8 @@ func (suite *Suite) TestTransactionPendingToFinalizedStatusTransition() { suite.Run("pending", func() { currentState = flow.TransactionStatusPending result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) suite.Assert().Equal(flow.TransactionStatusPending, result.Status) // assert that no call to an execution node is made suite.execClient.AssertNotCalled(suite.T(), "GetTransactionResult", mock.Anything, mock.Anything) @@ -1358,7 +1374,8 @@ func (suite *Suite) TestTransactionPendingToFinalizedStatusTransition() { suite.Run("finalized", func() { currentState = flow.TransactionStatusFinalized result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) suite.Assert().Equal(flow.TransactionStatusFinalized, result.Status) }) @@ -1383,7 +1400,8 @@ func (suite *Suite) TestTransactionResultUnknown() { // first call - when block under test is greater height than the sealed head, but execution node does not know about Tx result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) // status should be reported as unknown suite.Assert().Equal(flow.TransactionStatusUnknown, result.Status) @@ -1407,21 +1425,14 @@ func (suite *Suite) TestGetLatestFinalizedBlock() { On("Head"). Return(header, nil).Once() - headerClone := *header - headerClone.Height = 0 - - suite.snapshot. - On("Head"). - Return(&headerClone, nil). - Once() - suite.blocks. On("ByHeight", header.Height). Return(&expected, nil) // query the handler for the latest finalized header actual, stat, err := backend.GetLatestBlock(context.Background(), false) - suite.checkResponse(actual, err) + suite.Require().NoError(err) + suite.Require().NotNil(actual) // make sure we got the latest header suite.Require().Equal(expected, *actual) @@ -1500,7 +1511,8 @@ func (suite *Suite) TestGetExecutionResultByID() { // execute request er, err := backend.GetExecutionResultByID(ctx, executionResult.ID()) - suite.checkResponse(er, err) + suite.Require().NoError(err) + suite.Require().NotNil(er) require.Equal(suite.T(), executionResult, er) }) @@ -1564,7 +1576,8 @@ func (suite *Suite) TestGetExecutionResultByBlockID() { // execute request er, err := backend.GetExecutionResultForBlockID(ctx, blockID) - suite.checkResponse(er, err) + suite.Require().NoError(err) + suite.Require().NotNil(er) require.Equal(suite.T(), executionResult, er) }) @@ -1791,7 +1804,8 @@ func (suite *Suite) TestGetTransactionResultEventEncodingVersion() { Once() result, err := backend.GetTransactionResult(ctx, txId, blockId, flow.ZeroID, version) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) var expectedResult []flow.Event switch version { @@ -1855,7 +1869,8 @@ func (suite *Suite) TestGetTransactionResultByIndexAndBlockIdEventEncodingVersio Once() result, err := backend.GetTransactionResultByIndex(ctx, blockId, index, version) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) var expectedResult []flow.Event switch version { @@ -1886,7 +1901,8 @@ func (suite *Suite) TestGetTransactionResultByIndexAndBlockIdEventEncodingVersio Once() results, err := backend.GetTransactionResultsByBlockID(ctx, blockId, version) - suite.checkResponse(results, err) + suite.Require().NoError(err) + suite.Require().NotNil(results) var expectedResult []flow.Event switch version { @@ -1959,11 +1975,6 @@ func (suite *Suite) assertAllExpectations() { suite.execClient.AssertExpectations(suite.T()) } -func (suite *Suite) checkResponse(resp interface{}, err error) { - suite.Require().NoError(err) - suite.Require().NotNil(resp) -} - func (suite *Suite) setupReceipts(block *flow.Block) ([]*flow.ExecutionReceipt, flow.IdentityList) { ids := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) receipt1 := unittest.ReceiptForBlockFixture(block) @@ -2017,13 +2028,15 @@ func (suite *Suite) defaultBackendParams() Params { ExecutionResults: suite.results, ChainID: suite.chainID, CollectionRPC: suite.colClient, - MaxHeightRange: DefaultMaxHeightRange, + MaxHeightRange: events.DefaultMaxHeightRange, SnapshotHistoryLimit: DefaultSnapshotHistoryLimit, - Communicator: NewNodeCommunicator(false), + Communicator: node_communicator.NewNodeCommunicator(false), AccessMetrics: metrics.NewNoopCollector(), Log: suite.log, BlockTracker: nil, - TxResultQueryMode: IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, LastFullBlockHeight: suite.lastFullBlockHeight, VersionControl: suite.versionControl, ExecNodeIdentitiesProvider: commonrpc.NewExecutionNodeIdentitiesProvider( @@ -2036,9 +2049,9 @@ func (suite *Suite) defaultBackendParams() Params { } } -// TestResolveHeightError tests the resolveHeightError function for various scenarios where the block height +// TestResolveHeightError tests the ResolveHeightError function for various scenarios where the block height // is below the spork root height, below the node root height, above the node root height, or when a different -// error is provided. It validates that resolveHeightError returns an appropriate error message for each case. +// error is provided. It validates that ResolveHeightError returns an appropriate error message for each case. // // Test cases: // 1) If height is below the spork root height, it suggests using a historic node. @@ -2101,7 +2114,7 @@ func (suite *Suite) TestResolveHeightError() { stateParams.On("SealedRoot").Return(sealedRootHeader, nil).Once() } - err := resolveHeightError(stateParams, test.height, test.genericErr) + err := common.ResolveHeightError(stateParams, test.height, test.genericErr) if test.expectOriginalErr { suite.Assert().True(errors.Is(err, test.genericErr)) diff --git a/engine/access/rpc/backend/backend_transactions.go b/engine/access/rpc/backend/backend_transactions.go deleted file mode 100644 index 1186d04057a..00000000000 --- a/engine/access/rpc/backend/backend_transactions.go +++ /dev/null @@ -1,1344 +0,0 @@ -package backend - -import ( - "context" - "errors" - "fmt" - "time" - - lru "github.com/hashicorp/golang-lru/v2" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/flow/protobuf/go/flow/entities" - execproto "github.com/onflow/flow/protobuf/go/flow/execution" - "github.com/rs/zerolog" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/access/validator" - "github.com/onflow/flow-go/engine/access/rpc/connection" - "github.com/onflow/flow-go/engine/common/rpc" - commonrpc "github.com/onflow/flow-go/engine/common/rpc" - "github.com/onflow/flow-go/engine/common/rpc/convert" - accessmodel "github.com/onflow/flow-go/model/access" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/state" - "github.com/onflow/flow-go/storage" -) - -const DefaultFailedErrorMessage = "failed" - -type backendTransactions struct { - *TransactionsLocalDataProvider - staticCollectionRPC accessproto.AccessAPIClient // rpc client tied to a fixed collection node - transactions storage.Transactions - // NOTE: The transaction error message is currently only used by the access node and not by the observer node. - // To avoid introducing unnecessary command line arguments in the observer, one case could be that the error - // message cache is nil for the observer node. - txResultErrorMessages storage.TransactionResultErrorMessages - chainID flow.ChainID - transactionMetrics module.TransactionMetrics - transactionValidator *validator.TransactionValidator - retry *Retry - connFactory connection.ConnectionFactory - - previousAccessNodes []accessproto.AccessAPIClient - log zerolog.Logger - nodeCommunicator Communicator - txResultCache *lru.Cache[flow.Identifier, *accessmodel.TransactionResult] - txResultQueryMode IndexQueryMode - - systemTxID flow.Identifier - systemTx *flow.TransactionBody - execNodeIdentitiesProvider *commonrpc.ExecutionNodeIdentitiesProvider -} - -var _ TransactionErrorMessage = (*backendTransactions)(nil) - -// SendTransaction forwards the transaction to the collection node -func (b *backendTransactions) SendTransaction( - ctx context.Context, - tx *flow.TransactionBody, -) error { - now := time.Now().UTC() - - err := b.transactionValidator.Validate(ctx, tx) - if err != nil { - return status.Errorf(codes.InvalidArgument, "invalid transaction: %s", err.Error()) - } - - // send the transaction to the collection node if valid - err = b.trySendTransaction(ctx, tx) - if err != nil { - b.transactionMetrics.TransactionSubmissionFailed() - return rpc.ConvertError(err, "failed to send transaction to a collection node", codes.Internal) - } - - b.transactionMetrics.TransactionReceived(tx.ID(), now) - - // store the transaction locally - err = b.transactions.Store(tx) - if err != nil { - return status.Errorf(codes.Internal, "failed to store transaction: %v", err) - } - - if b.retry.IsActive() { - go b.registerTransactionForRetry(tx) - } - - return nil -} - -// trySendTransaction tries to transaction to a collection node -func (b *backendTransactions) trySendTransaction(ctx context.Context, tx *flow.TransactionBody) error { - // if a collection node rpc client was provided at startup, just use that - if b.staticCollectionRPC != nil { - return b.grpcTxSend(ctx, b.staticCollectionRPC, tx) - } - - // otherwise choose all collection nodes to try - collNodes, err := b.chooseCollectionNodes(tx.ID()) - if err != nil { - return fmt.Errorf("failed to determine collection node for tx %x: %w", tx, err) - } - - var sendError error - logAnyError := func() { - if sendError != nil { - b.log.Info().Err(err).Msg("failed to send transactions to collector nodes") - } - } - defer logAnyError() - - // try sending the transaction to one of the chosen collection nodes - sendError = b.nodeCommunicator.CallAvailableNode( - collNodes, - func(node *flow.IdentitySkeleton) error { - err = b.sendTransactionToCollector(ctx, tx, node.Address) - if err != nil { - return err - } - return nil - }, - nil, - ) - - return sendError -} - -// chooseCollectionNodes finds a random subset of size sampleSize of collection node addresses from the -// collection node cluster responsible for the given tx -func (b *backendTransactions) chooseCollectionNodes(txID flow.Identifier) (flow.IdentitySkeletonList, error) { - // retrieve the set of collector clusters - currentEpoch, err := b.state.Final().Epochs().Current() - if err != nil { - return nil, fmt.Errorf("could not get current epoch: %w", err) - } - clusters, err := currentEpoch.Clustering() - if err != nil { - return nil, fmt.Errorf("could not cluster collection nodes: %w", err) - } - - // get the cluster responsible for the transaction - targetNodes, ok := clusters.ByTxID(txID) - if !ok { - return nil, fmt.Errorf("could not get local cluster by txID: %x", txID) - } - - return targetNodes, nil -} - -// sendTransactionToCollection sends the transaction to the given collection node via grpc -func (b *backendTransactions) sendTransactionToCollector( - ctx context.Context, - tx *flow.TransactionBody, - collectionNodeAddr string, -) error { - collectionRPC, closer, err := b.connFactory.GetAccessAPIClient(collectionNodeAddr, nil) - if err != nil { - return fmt.Errorf("failed to connect to collection node at %s: %w", collectionNodeAddr, err) - } - defer closer.Close() - - err = b.grpcTxSend(ctx, collectionRPC, tx) - if err != nil { - return fmt.Errorf("failed to send transaction to collection node at %s: %w", collectionNodeAddr, err) - } - return nil -} - -func (b *backendTransactions) grpcTxSend(ctx context.Context, client accessproto.AccessAPIClient, tx *flow.TransactionBody) error { - colReq := &accessproto.SendTransactionRequest{ - Transaction: convert.TransactionToMessage(*tx), - } - - clientDeadline := time.Now().Add(time.Duration(2) * time.Second) - ctx, cancel := context.WithDeadline(ctx, clientDeadline) - defer cancel() - - _, err := client.SendTransaction(ctx, colReq) - return err -} - -// SendRawTransaction sends a raw transaction to the collection node -func (b *backendTransactions) SendRawTransaction( - ctx context.Context, - tx *flow.TransactionBody, -) error { - // send the transaction to the collection node - return b.trySendTransaction(ctx, tx) -} - -func (b *backendTransactions) GetTransaction(ctx context.Context, txID flow.Identifier) (*flow.TransactionBody, error) { - // look up transaction from storage - tx, err := b.transactions.ByID(txID) - txErr := rpc.ConvertStorageError(err) - - if txErr != nil { - if status.Code(txErr) == codes.NotFound { - return b.getHistoricalTransaction(ctx, txID) - } - // Other Error trying to retrieve the transaction, return with err - return nil, txErr - } - - return tx, nil -} - -func (b *backendTransactions) GetTransactionsByBlockID( - _ context.Context, - blockID flow.Identifier, -) ([]*flow.TransactionBody, error) { - var transactions []*flow.TransactionBody - - // TODO: consider using storage.Index.ByBlockID, the index contains collection id and seals ID - block, err := b.blocks.ByID(blockID) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - for _, guarantee := range block.Payload.Guarantees { - collection, err := b.collections.ByID(guarantee.CollectionID) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - transactions = append(transactions, collection.Transactions...) - } - - transactions = append(transactions, b.systemTx) - - return transactions, nil -} - -func (b *backendTransactions) GetTransactionResult( - ctx context.Context, - txID flow.Identifier, - blockID flow.Identifier, - collectionID flow.Identifier, - requiredEventEncodingVersion entities.EventEncodingVersion, -) (*accessmodel.TransactionResult, error) { - // look up transaction from storage - start := time.Now() - - tx, err := b.transactions.ByID(txID) - if err != nil { - txErr := rpc.ConvertStorageError(err) - - if status.Code(txErr) != codes.NotFound { - return nil, txErr - } - - // Tx not found. If we have historical Sporks setup, lets look through those as well - if b.txResultCache != nil { - val, ok := b.txResultCache.Get(txID) - if ok { - return val, nil - } - } - historicalTxResult, err := b.getHistoricalTransactionResult(ctx, txID) - if err != nil { - // if tx not found in old access nodes either, then assume that the tx was submitted to a different AN - // and return status as unknown - txStatus := flow.TransactionStatusUnknown - result := &accessmodel.TransactionResult{ - Status: txStatus, - StatusCode: uint(txStatus), - } - if b.txResultCache != nil { - b.txResultCache.Add(txID, result) - } - return result, nil - } - - if b.txResultCache != nil { - b.txResultCache.Add(txID, historicalTxResult) - } - return historicalTxResult, nil - } - - block, err := b.retrieveBlock(blockID, collectionID, txID) - // an error occurred looking up the block or the requested block or collection was not found. - // If looking up the block based solely on the txID returns not found, then no error is - // returned since the block may not be finalized yet. - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - var blockHeight uint64 - var txResult *accessmodel.TransactionResult - // access node may not have the block if it hasn't yet been finalized, hence block can be nil at this point - if block != nil { - txResult, err = b.lookupTransactionResult(ctx, txID, block.Header, requiredEventEncodingVersion) - if err != nil { - return nil, rpc.ConvertError(err, "failed to retrieve result", codes.Internal) - } - - // an additional check to ensure the correctness of the collection ID. - expectedCollectionID, err := b.LookupCollectionIDInBlock(block, txID) - if err != nil { - // if the collection has not been indexed yet, the lookup will return a not found error. - // if the request included a blockID or collectionID in its the search criteria, not found - // should result in an error because it's not possible to guarantee that the result found - // is the correct one. - if blockID != flow.ZeroID || collectionID != flow.ZeroID { - return nil, rpc.ConvertStorageError(err) - } - } - - if collectionID == flow.ZeroID { - collectionID = expectedCollectionID - } else if collectionID != expectedCollectionID { - return nil, status.Error(codes.InvalidArgument, "transaction not found in provided collection") - } - - blockID = block.ID() - blockHeight = block.Header.Height - } - - // If there is still no transaction result, provide one based on available information. - if txResult == nil { - var txStatus flow.TransactionStatus - // Derive the status of the transaction. - if block == nil { - txStatus, err = b.DeriveUnknownTransactionStatus(tx.ReferenceBlockID) - } else { - txStatus, err = b.DeriveTransactionStatus(blockHeight, false) - } - - if err != nil { - if !errors.Is(err, state.ErrUnknownSnapshotReference) { - irrecoverable.Throw(ctx, err) - } - return nil, rpc.ConvertStorageError(err) - } - - txResult = &accessmodel.TransactionResult{ - BlockID: blockID, - BlockHeight: blockHeight, - TransactionID: txID, - Status: txStatus, - CollectionID: collectionID, - } - } else { - txResult.CollectionID = collectionID - } - - b.transactionMetrics.TransactionResultFetched(time.Since(start), len(tx.Script)) - - return txResult, nil -} - -// retrieveBlock function returns a block based on the input arguments. -// The block ID lookup has the highest priority, followed by the collection ID lookup. -// If both are missing, the default lookup by transaction ID is performed. -// -// If looking up the block based solely on the txID returns not found, then no error is returned. -// -// Expected errors: -// - storage.ErrNotFound if the requested block or collection was not found. -func (b *backendTransactions) retrieveBlock( - blockID flow.Identifier, - collectionID flow.Identifier, - txID flow.Identifier, -) (*flow.Block, error) { - if blockID != flow.ZeroID { - return b.blocks.ByID(blockID) - } - - if collectionID != flow.ZeroID { - return b.blocks.ByCollectionID(collectionID) - } - - // find the block for the transaction - block, err := b.lookupBlock(txID) - - if err != nil && !errors.Is(err, storage.ErrNotFound) { - return nil, err - } - - return block, nil -} - -func (b *backendTransactions) GetTransactionResultsByBlockID( - ctx context.Context, - blockID flow.Identifier, - requiredEventEncodingVersion entities.EventEncodingVersion, -) ([]*accessmodel.TransactionResult, error) { - // TODO: consider using storage.Index.ByBlockID, the index contains collection id and seals ID - block, err := b.blocks.ByID(blockID) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - switch b.txResultQueryMode { - case IndexQueryModeExecutionNodesOnly: - return b.getTransactionResultsByBlockIDFromExecutionNode(ctx, block, requiredEventEncodingVersion) - case IndexQueryModeLocalOnly: - return b.GetTransactionResultsByBlockIDFromStorage(ctx, block, requiredEventEncodingVersion) - case IndexQueryModeFailover: - results, err := b.GetTransactionResultsByBlockIDFromStorage(ctx, block, requiredEventEncodingVersion) - if err == nil { - return results, nil - } - - // If any error occurs with local storage - request transaction result from EN - return b.getTransactionResultsByBlockIDFromExecutionNode(ctx, block, requiredEventEncodingVersion) - default: - return nil, status.Errorf(codes.Internal, "unknown transaction result query mode: %v", b.txResultQueryMode) - } -} - -func (b *backendTransactions) getTransactionResultsByBlockIDFromExecutionNode( - ctx context.Context, - block *flow.Block, - requiredEventEncodingVersion entities.EventEncodingVersion, -) ([]*accessmodel.TransactionResult, error) { - blockID := block.ID() - req := &execproto.GetTransactionsByBlockIDRequest{ - BlockId: blockID[:], - } - - execNodes, err := b.execNodeIdentitiesProvider.ExecutionNodesForBlockID( - ctx, - blockID, - ) - if err != nil { - if IsInsufficientExecutionReceipts(err) { - return nil, status.Error(codes.NotFound, err.Error()) - } - return nil, rpc.ConvertError(err, "failed to retrieve result from any execution node", codes.Internal) - } - - resp, err := b.getTransactionResultsByBlockIDFromAnyExeNode(ctx, execNodes, req) - if err != nil { - return nil, rpc.ConvertError(err, "failed to retrieve result from execution node", codes.Internal) - } - - results := make([]*accessmodel.TransactionResult, 0, len(resp.TransactionResults)) - i := 0 - errInsufficientResults := status.Errorf( - codes.Internal, - "number of transaction results returned by execution node is less than the number of transactions in the block", - ) - - for _, guarantee := range block.Payload.Guarantees { - collection, err := b.collections.LightByID(guarantee.CollectionID) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - for _, txID := range collection.Transactions { - // bounds check. this means the EN returned fewer transaction results than the transactions in the block - if i >= len(resp.TransactionResults) { - return nil, errInsufficientResults - } - txResult := resp.TransactionResults[i] - - // tx body is irrelevant to status if it's in an executed block - txStatus, err := b.DeriveTransactionStatus(block.Header.Height, true) - if err != nil { - if !errors.Is(err, state.ErrUnknownSnapshotReference) { - irrecoverable.Throw(ctx, err) - } - return nil, rpc.ConvertStorageError(err) - } - events, err := convert.MessagesToEventsWithEncodingConversion(txResult.GetEvents(), resp.GetEventEncodingVersion(), requiredEventEncodingVersion) - if err != nil { - return nil, status.Errorf(codes.Internal, - "failed to convert events to message in txID %x: %v", txID, err) - } - - results = append(results, &accessmodel.TransactionResult{ - Status: txStatus, - StatusCode: uint(txResult.GetStatusCode()), - Events: events, - ErrorMessage: txResult.GetErrorMessage(), - BlockID: blockID, - TransactionID: txID, - CollectionID: guarantee.CollectionID, - BlockHeight: block.Header.Height, - }) - - i++ - } - } - - // after iterating through all transactions in each collection, i equals the total number of - // user transactions in the block - txCount := i - sporkRootBlockHeight := b.state.Params().SporkRootBlockHeight() - - // root block has no system transaction result - if block.Header.Height > sporkRootBlockHeight { - // system chunk transaction - - // resp.TransactionResults includes the system tx result, so there should be exactly one - // more result than txCount - if txCount != len(resp.TransactionResults)-1 { - if txCount >= len(resp.TransactionResults) { - return nil, errInsufficientResults - } - // otherwise there are extra results - // TODO(bft): slashable offense - return nil, status.Errorf(codes.Internal, "number of transaction results returned by execution node is more than the number of transactions in the block") - } - - systemTxResult := resp.TransactionResults[len(resp.TransactionResults)-1] - systemTxStatus, err := b.DeriveTransactionStatus(block.Header.Height, true) - if err != nil { - if !errors.Is(err, state.ErrUnknownSnapshotReference) { - irrecoverable.Throw(ctx, err) - } - return nil, rpc.ConvertStorageError(err) - } - - events, err := convert.MessagesToEventsWithEncodingConversion(systemTxResult.GetEvents(), resp.GetEventEncodingVersion(), requiredEventEncodingVersion) - if err != nil { - return nil, rpc.ConvertError(err, "failed to convert events from system tx result", codes.Internal) - } - - results = append(results, &accessmodel.TransactionResult{ - Status: systemTxStatus, - StatusCode: uint(systemTxResult.GetStatusCode()), - Events: events, - ErrorMessage: systemTxResult.GetErrorMessage(), - BlockID: blockID, - TransactionID: b.systemTxID, - BlockHeight: block.Header.Height, - }) - } - return results, nil -} - -// GetTransactionResultByIndex returns transactions Results for an index in a block that is executed, -// pending or finalized transactions return errors -func (b *backendTransactions) GetTransactionResultByIndex( - ctx context.Context, - blockID flow.Identifier, - index uint32, - requiredEventEncodingVersion entities.EventEncodingVersion, -) (*accessmodel.TransactionResult, error) { - // TODO: https://github.com/onflow/flow-go/issues/2175 so caching doesn't cause a circular dependency - block, err := b.blocks.ByID(blockID) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - switch b.txResultQueryMode { - case IndexQueryModeExecutionNodesOnly: - return b.getTransactionResultByIndexFromExecutionNode(ctx, block, index, requiredEventEncodingVersion) - case IndexQueryModeLocalOnly: - return b.GetTransactionResultByIndexFromStorage(ctx, block, index, requiredEventEncodingVersion) - case IndexQueryModeFailover: - result, err := b.GetTransactionResultByIndexFromStorage(ctx, block, index, requiredEventEncodingVersion) - if err == nil { - return result, nil - } - - // If any error occurs with local storage - request transaction result from EN - return b.getTransactionResultByIndexFromExecutionNode(ctx, block, index, requiredEventEncodingVersion) - default: - return nil, status.Errorf(codes.Internal, "unknown transaction result query mode: %v", b.txResultQueryMode) - } -} - -func (b *backendTransactions) getTransactionResultByIndexFromExecutionNode( - ctx context.Context, - block *flow.Block, - index uint32, - requiredEventEncodingVersion entities.EventEncodingVersion, -) (*accessmodel.TransactionResult, error) { - blockID := block.ID() - // create request and forward to EN - req := &execproto.GetTransactionByIndexRequest{ - BlockId: blockID[:], - Index: index, - } - - execNodes, err := b.execNodeIdentitiesProvider.ExecutionNodesForBlockID( - ctx, - blockID, - ) - if err != nil { - if IsInsufficientExecutionReceipts(err) { - return nil, status.Error(codes.NotFound, err.Error()) - } - return nil, rpc.ConvertError(err, "failed to retrieve result from any execution node", codes.Internal) - } - - resp, err := b.getTransactionResultByIndexFromAnyExeNode(ctx, execNodes, req) - if err != nil { - return nil, rpc.ConvertError(err, "failed to retrieve result from execution node", codes.Internal) - } - - // tx body is irrelevant to status if it's in an executed block - txStatus, err := b.DeriveTransactionStatus(block.Header.Height, true) - if err != nil { - if !errors.Is(err, state.ErrUnknownSnapshotReference) { - irrecoverable.Throw(ctx, err) - } - return nil, rpc.ConvertStorageError(err) - } - - events, err := convert.MessagesToEventsWithEncodingConversion(resp.GetEvents(), resp.GetEventEncodingVersion(), requiredEventEncodingVersion) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to convert events in blockID %x: %v", blockID, err) - } - - // convert to response, cache and return - return &accessmodel.TransactionResult{ - Status: txStatus, - StatusCode: uint(resp.GetStatusCode()), - Events: events, - ErrorMessage: resp.GetErrorMessage(), - BlockID: blockID, - BlockHeight: block.Header.Height, - }, nil -} - -// GetSystemTransaction returns system transaction -func (b *backendTransactions) GetSystemTransaction(ctx context.Context, _ flow.Identifier) (*flow.TransactionBody, error) { - return b.systemTx, nil -} - -// GetSystemTransactionResult returns system transaction result -func (b *backendTransactions) GetSystemTransactionResult(ctx context.Context, blockID flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) (*accessmodel.TransactionResult, error) { - block, err := b.blocks.ByID(blockID) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - return b.lookupTransactionResult(ctx, b.systemTxID, block.Header, requiredEventEncodingVersion) -} - -// Error returns: -// - `storage.ErrNotFound` - collection referenced by transaction or block by a collection has not been found. -// - all other errors are unexpected and potentially symptoms of internal implementation bugs or state corruption (fatal). -func (b *backendTransactions) lookupBlock(txID flow.Identifier) (*flow.Block, error) { - collection, err := b.collections.LightByTransactionID(txID) - if err != nil { - return nil, err - } - - block, err := b.blocks.ByCollectionID(collection.ID()) - if err != nil { - return nil, err - } - - return block, nil -} - -func (b *backendTransactions) lookupTransactionResult( - ctx context.Context, - txID flow.Identifier, - block *flow.Header, - requiredEventEncodingVersion entities.EventEncodingVersion, -) (*accessmodel.TransactionResult, error) { - var txResult *accessmodel.TransactionResult - var err error - switch b.txResultQueryMode { - case IndexQueryModeExecutionNodesOnly: - txResult, err = b.GetTransactionResultFromExecutionNode(ctx, block, txID, requiredEventEncodingVersion) - case IndexQueryModeLocalOnly: - txResult, err = b.GetTransactionResultFromStorage(ctx, block, txID, requiredEventEncodingVersion) - case IndexQueryModeFailover: - txResult, err = b.GetTransactionResultFromStorage(ctx, block, txID, requiredEventEncodingVersion) - if err != nil { - // If any error occurs with local storage - request transaction result from EN - txResult, err = b.GetTransactionResultFromExecutionNode(ctx, block, txID, requiredEventEncodingVersion) - } - default: - return nil, status.Errorf(codes.Internal, "unknown transaction result query mode: %v", b.txResultQueryMode) - } - - if err != nil { - // if either the storage or execution node reported no results or there were not enough execution results - if status.Code(err) == codes.NotFound { - // No result yet, indicate that it has not been executed - return nil, nil - } - // Other Error trying to retrieve the result, return with err - return nil, err - } - - // considered executed as long as some result is returned, even if it's an error message - return txResult, nil -} - -func (b *backendTransactions) getHistoricalTransaction( - ctx context.Context, - txID flow.Identifier, -) (*flow.TransactionBody, error) { - for _, historicalNode := range b.previousAccessNodes { - txResp, err := historicalNode.GetTransaction(ctx, &accessproto.GetTransactionRequest{Id: txID[:]}) - if err == nil { - tx, err := convert.MessageToTransaction(txResp.Transaction, b.chainID.Chain()) - if err != nil { - return nil, status.Errorf(codes.Internal, "could not convert transaction: %v", err) - } - - // Found on a historical node. Report - return &tx, nil - } - // Otherwise, if not found, just continue - if status.Code(err) == codes.NotFound { - continue - } - // TODO should we do something if the error isn't not found? - } - return nil, status.Errorf(codes.NotFound, "no known transaction with ID %s", txID) -} - -func (b *backendTransactions) getHistoricalTransactionResult( - ctx context.Context, - txID flow.Identifier, -) (*accessmodel.TransactionResult, error) { - for _, historicalNode := range b.previousAccessNodes { - result, err := historicalNode.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{Id: txID[:]}) - if err == nil { - // Found on a historical node. Report - if result.GetStatus() == entities.TransactionStatus_UNKNOWN { - // We've moved to returning Status UNKNOWN instead of an error with the NotFound status, - // Therefore we should continue and look at the next access node for answers. - continue - } - - if result.GetStatus() == entities.TransactionStatus_PENDING { - // This is on a historical node. No transactions from it will ever be - // executed, therefore we should consider this expired - result.Status = entities.TransactionStatus_EXPIRED - } - - return convert.MessageToTransactionResult(result), nil - } - // Otherwise, if not found, just continue - if status.Code(err) == codes.NotFound { - continue - } - // TODO should we do something if the error isn't not found? - } - return nil, status.Errorf(codes.NotFound, "no known transaction with ID %s", txID) -} - -func (b *backendTransactions) registerTransactionForRetry(tx *flow.TransactionBody) { - referenceBlock, err := b.state.AtBlockID(tx.ReferenceBlockID).Head() - if err != nil { - return - } - - b.retry.RegisterTransaction(referenceBlock.Height, tx) -} - -func (b *backendTransactions) GetTransactionResultFromExecutionNode( - ctx context.Context, - block *flow.Header, - transactionID flow.Identifier, - requiredEventEncodingVersion entities.EventEncodingVersion, -) (*accessmodel.TransactionResult, error) { - blockID := block.ID() - // create an execution API request for events at blockID and transactionID - req := &execproto.GetTransactionResultRequest{ - BlockId: blockID[:], - TransactionId: transactionID[:], - } - - execNodes, err := b.execNodeIdentitiesProvider.ExecutionNodesForBlockID( - ctx, - blockID, - ) - if err != nil { - // if no execution receipt were found, return a NotFound GRPC error - if IsInsufficientExecutionReceipts(err) { - return nil, status.Error(codes.NotFound, err.Error()) - } - return nil, err - } - - resp, err := b.getTransactionResultFromAnyExeNode(ctx, execNodes, req) - if err != nil { - return nil, err - } - - // tx body is irrelevant to status if it's in an executed block - txStatus, err := b.DeriveTransactionStatus(block.Height, true) - if err != nil { - if !errors.Is(err, state.ErrUnknownSnapshotReference) { - irrecoverable.Throw(ctx, err) - } - return nil, rpc.ConvertStorageError(err) - } - - events, err := convert.MessagesToEventsWithEncodingConversion(resp.GetEvents(), resp.GetEventEncodingVersion(), requiredEventEncodingVersion) - if err != nil { - return nil, rpc.ConvertError(err, "failed to convert events to message", codes.Internal) - } - - return &accessmodel.TransactionResult{ - TransactionID: transactionID, - Status: txStatus, - StatusCode: uint(resp.GetStatusCode()), - Events: events, - ErrorMessage: resp.GetErrorMessage(), - BlockID: blockID, - BlockHeight: block.Height, - }, nil -} - -// ATTENTION: might be a source of problems in future. We run this code on finalization gorotuine, -// potentially lagging finalization events if operations take long time. -// We might need to move this logic on dedicated goroutine and provide a way to skip finalization events if they are delivered -// too often for this engine. An example of similar approach - https://github.com/onflow/flow-go/blob/10b0fcbf7e2031674c00f3cdd280f27bd1b16c47/engine/common/follower/compliance_engine.go#L201.. -// No errors expected during normal operations. -func (b *backendTransactions) ProcessFinalizedBlockHeight(height uint64) error { - return b.retry.Retry(height) -} - -func (b *backendTransactions) getTransactionResultFromAnyExeNode( - ctx context.Context, - execNodes flow.IdentitySkeletonList, - req *execproto.GetTransactionResultRequest, -) (*execproto.GetTransactionResultResponse, error) { - var errToReturn error - - defer func() { - if errToReturn != nil { - b.log.Info().Err(errToReturn).Msg("failed to get transaction result from execution nodes") - } - }() - - var resp *execproto.GetTransactionResultResponse - errToReturn = b.nodeCommunicator.CallAvailableNode( - execNodes, - func(node *flow.IdentitySkeleton) error { - var err error - resp, err = b.tryGetTransactionResult(ctx, node, req) - if err == nil { - b.log.Debug(). - Str("execution_node", node.String()). - Hex("block_id", req.GetBlockId()). - Hex("transaction_id", req.GetTransactionId()). - Msg("Successfully got transaction results from any node") - return nil - } - return err - }, - nil, - ) - - return resp, errToReturn -} - -func (b *backendTransactions) tryGetTransactionResult( - ctx context.Context, - execNode *flow.IdentitySkeleton, - req *execproto.GetTransactionResultRequest, -) (*execproto.GetTransactionResultResponse, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(execNode.Address) - if err != nil { - return nil, err - } - defer closer.Close() - - resp, err := execRPCClient.GetTransactionResult(ctx, req) - if err != nil { - return nil, err - } - - return resp, nil -} - -func (b *backendTransactions) getTransactionResultsByBlockIDFromAnyExeNode( - ctx context.Context, - execNodes flow.IdentitySkeletonList, - req *execproto.GetTransactionsByBlockIDRequest, -) (*execproto.GetTransactionResultsResponse, error) { - var errToReturn error - - defer func() { - // log the errors - if errToReturn != nil { - b.log.Err(errToReturn).Msg("failed to get transaction results from execution nodes") - } - }() - - // if we were passed 0 execution nodes add a specific error - if len(execNodes) == 0 { - return nil, errors.New("zero execution nodes") - } - - var resp *execproto.GetTransactionResultsResponse - errToReturn = b.nodeCommunicator.CallAvailableNode( - execNodes, - func(node *flow.IdentitySkeleton) error { - var err error - resp, err = b.tryGetTransactionResultsByBlockID(ctx, node, req) - if err == nil { - b.log.Debug(). - Str("execution_node", node.String()). - Hex("block_id", req.GetBlockId()). - Msg("Successfully got transaction results from any node") - return nil - } - return err - }, - nil, - ) - - return resp, errToReturn -} - -func (b *backendTransactions) tryGetTransactionResultsByBlockID( - ctx context.Context, - execNode *flow.IdentitySkeleton, - req *execproto.GetTransactionsByBlockIDRequest, -) (*execproto.GetTransactionResultsResponse, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(execNode.Address) - if err != nil { - return nil, err - } - defer closer.Close() - - resp, err := execRPCClient.GetTransactionResultsByBlockID(ctx, req) - if err != nil { - return nil, err - } - - return resp, nil -} - -func (b *backendTransactions) getTransactionResultByIndexFromAnyExeNode( - ctx context.Context, - execNodes flow.IdentitySkeletonList, - req *execproto.GetTransactionByIndexRequest, -) (*execproto.GetTransactionResultResponse, error) { - var errToReturn error - defer func() { - if errToReturn != nil { - b.log.Info().Err(errToReturn).Msg("failed to get transaction result from execution nodes") - } - }() - - if len(execNodes) == 0 { - return nil, errors.New("zero execution nodes provided") - } - - var resp *execproto.GetTransactionResultResponse - errToReturn = b.nodeCommunicator.CallAvailableNode( - execNodes, - func(node *flow.IdentitySkeleton) error { - var err error - resp, err = b.tryGetTransactionResultByIndex(ctx, node, req) - if err == nil { - b.log.Debug(). - Str("execution_node", node.String()). - Hex("block_id", req.GetBlockId()). - Uint32("index", req.GetIndex()). - Msg("Successfully got transaction results from any node") - return nil - } - return err - }, - nil, - ) - - return resp, errToReturn -} - -func (b *backendTransactions) tryGetTransactionResultByIndex( - ctx context.Context, - execNode *flow.IdentitySkeleton, - req *execproto.GetTransactionByIndexRequest, -) (*execproto.GetTransactionResultResponse, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(execNode.Address) - if err != nil { - return nil, err - } - defer closer.Close() - - resp, err := execRPCClient.GetTransactionResultByIndex(ctx, req) - if err != nil { - return nil, err - } - - return resp, nil -} - -// LookupErrorMessageByTransactionID returns transaction error message for specified transaction. -// If transaction error messages are stored locally, they will be checked first in local storage. -// If error messages are not stored locally, an RPC call will be made to the EN to fetch message. -// -// Expected errors during normal operation: -// - InsufficientExecutionReceipts - found insufficient receipts for the given block ID. -// - status.Error - remote GRPC call to EN has failed. -func (b *backendTransactions) LookupErrorMessageByTransactionID( - ctx context.Context, - blockID flow.Identifier, - height uint64, - transactionID flow.Identifier, -) (string, error) { - if b.txResultErrorMessages != nil { - res, err := b.txResultErrorMessages.ByBlockIDTransactionID(blockID, transactionID) - if err == nil { - return res.ErrorMessage, nil - } - } - - execNodes, err := b.execNodeIdentitiesProvider.ExecutionNodesForBlockID( - ctx, - blockID, - ) - if err != nil { - if IsInsufficientExecutionReceipts(err) { - return "", status.Error(codes.NotFound, err.Error()) - } - return "", rpc.ConvertError(err, "failed to select execution nodes", codes.Internal) - } - req := &execproto.GetTransactionErrorMessageRequest{ - BlockId: convert.IdentifierToMessage(blockID), - TransactionId: convert.IdentifierToMessage(transactionID), - } - - resp, err := b.getTransactionErrorMessageFromAnyEN(ctx, execNodes, req) - if err != nil { - // If no execution nodes return a valid response, - // return a static message "failed". - txResult, err := b.txResultsIndex.ByBlockIDTransactionID(blockID, height, transactionID) - if err != nil { - return "", rpc.ConvertStorageError(err) - } - - if txResult.Failed { - return DefaultFailedErrorMessage, nil - } - - // in case tx result is not failed - return "", nil - } - - return resp.ErrorMessage, nil -} - -// LookupErrorMessageByIndex returns the transaction error message for a specified transaction using its index. -// If transaction error messages are stored locally, they will be checked first in local storage. -// If error messages are not stored locally, an RPC call will be made to the EN to fetch message. -// -// Expected errors during normal operation: -// - InsufficientExecutionReceipts - found insufficient receipts for the given block ID. -// - status.Error - remote GRPC call to EN has failed. -func (b *backendTransactions) LookupErrorMessageByIndex( - ctx context.Context, - blockID flow.Identifier, - height uint64, - index uint32, -) (string, error) { - if b.txResultErrorMessages != nil { - res, err := b.txResultErrorMessages.ByBlockIDTransactionIndex(blockID, index) - if err == nil { - return res.ErrorMessage, nil - } - } - - execNodes, err := b.execNodeIdentitiesProvider.ExecutionNodesForBlockID( - ctx, - blockID, - ) - if err != nil { - if IsInsufficientExecutionReceipts(err) { - return "", status.Error(codes.NotFound, err.Error()) - } - return "", rpc.ConvertError(err, "failed to select execution nodes", codes.Internal) - } - req := &execproto.GetTransactionErrorMessageByIndexRequest{ - BlockId: convert.IdentifierToMessage(blockID), - Index: index, - } - - resp, err := b.getTransactionErrorMessageByIndexFromAnyEN(ctx, execNodes, req) - if err != nil { - // If no execution nodes return a valid response, - // return a static message "failed" - txResult, err := b.txResultsIndex.ByBlockIDTransactionIndex(blockID, height, index) - if err != nil { - return "", rpc.ConvertStorageError(err) - } - - if txResult.Failed { - return DefaultFailedErrorMessage, nil - } - - // in case tx result is not failed - return "", nil - } - - return resp.ErrorMessage, nil -} - -// LookupErrorMessagesByBlockID returns all error messages for failed transactions by blockID. -// If transaction error messages are stored locally, they will be checked first in local storage. -// If error messages are not stored locally, an RPC call will be made to the EN to fetch messages. -// -// Expected errors during normal operation: -// - InsufficientExecutionReceipts - found insufficient receipts for the given block ID. -// - status.Error - remote GRPC call to EN has failed. -func (b *backendTransactions) LookupErrorMessagesByBlockID( - ctx context.Context, - blockID flow.Identifier, - height uint64, -) (map[flow.Identifier]string, error) { - result := make(map[flow.Identifier]string) - - if b.txResultErrorMessages != nil { - res, err := b.txResultErrorMessages.ByBlockID(blockID) - if err == nil { - for _, value := range res { - result[value.TransactionID] = value.ErrorMessage - } - - return result, nil - } - } - - execNodes, err := b.execNodeIdentitiesProvider.ExecutionNodesForBlockID( - ctx, - blockID, - ) - if err != nil { - if IsInsufficientExecutionReceipts(err) { - return nil, status.Error(codes.NotFound, err.Error()) - } - return nil, rpc.ConvertError(err, "failed to select execution nodes", codes.Internal) - } - req := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ - BlockId: convert.IdentifierToMessage(blockID), - } - - resp, _, err := b.GetTransactionErrorMessagesFromAnyEN(ctx, execNodes, req) - if err != nil { - // If no execution nodes return a valid response, - // return a static message "failed" - txResults, err := b.txResultsIndex.ByBlockID(blockID, height) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - for _, txResult := range txResults { - if txResult.Failed { - result[txResult.TransactionID] = DefaultFailedErrorMessage - } - } - - return result, nil - } - - for _, value := range resp { - result[convert.MessageToIdentifier(value.TransactionId)] = value.ErrorMessage - } - - return result, nil -} - -// getTransactionErrorMessageFromAnyEN performs an RPC call using available nodes passed as argument. List of nodes must be non-empty otherwise an error will be returned. -// Expected errors during normal operation: -// - status.Error - GRPC call failed, some of possible codes are: -// - codes.NotFound - request cannot be served by EN because of absence of data. -// - codes.Unavailable - remote node is not unavailable. -func (b *backendTransactions) getTransactionErrorMessageFromAnyEN( - ctx context.Context, - execNodes flow.IdentitySkeletonList, - req *execproto.GetTransactionErrorMessageRequest, -) (*execproto.GetTransactionErrorMessageResponse, error) { - // if we were passed 0 execution nodes add a specific error - if len(execNodes) == 0 { - return nil, errors.New("zero execution nodes") - } - - var resp *execproto.GetTransactionErrorMessageResponse - errToReturn := b.nodeCommunicator.CallAvailableNode( - execNodes, - func(node *flow.IdentitySkeleton) error { - var err error - resp, err = b.tryGetTransactionErrorMessageFromEN(ctx, node, req) - if err == nil { - b.log.Debug(). - Str("execution_node", node.String()). - Hex("block_id", req.GetBlockId()). - Hex("transaction_id", req.GetTransactionId()). - Msg("Successfully got transaction error message from any node") - return nil - } - return err - }, - nil, - ) - - // log the errors - if errToReturn != nil { - b.log.Err(errToReturn).Msg("failed to get transaction error message from execution nodes") - return nil, errToReturn - } - - return resp, nil -} - -// getTransactionErrorMessageFromAnyEN performs an RPC call using available nodes passed as argument. List of nodes must be non-empty otherwise an error will be returned. -// Expected errors during normal operation: -// - status.Error - GRPC call failed, some of possible codes are: -// - codes.NotFound - request cannot be served by EN because of absence of data. -// - codes.Unavailable - remote node is not unavailable. -func (b *backendTransactions) getTransactionErrorMessageByIndexFromAnyEN( - ctx context.Context, - execNodes flow.IdentitySkeletonList, - req *execproto.GetTransactionErrorMessageByIndexRequest, -) (*execproto.GetTransactionErrorMessageResponse, error) { - // if we were passed 0 execution nodes add a specific error - if len(execNodes) == 0 { - return nil, errors.New("zero execution nodes") - } - - var resp *execproto.GetTransactionErrorMessageResponse - errToReturn := b.nodeCommunicator.CallAvailableNode( - execNodes, - func(node *flow.IdentitySkeleton) error { - var err error - resp, err = b.tryGetTransactionErrorMessageByIndexFromEN(ctx, node, req) - if err == nil { - b.log.Debug(). - Str("execution_node", node.String()). - Hex("block_id", req.GetBlockId()). - Uint32("index", req.GetIndex()). - Msg("Successfully got transaction error message by index from any node") - return nil - } - return err - }, - nil, - ) - if errToReturn != nil { - b.log.Err(errToReturn).Msg("failed to get transaction error message by index from execution nodes") - return nil, errToReturn - } - - return resp, nil -} - -// GetTransactionErrorMessagesFromAnyEN performs an RPC call using available nodes passed as argument. List of nodes must be non-empty otherwise an error will be returned. -// Expected errors during normal operation: -// - status.Error - GRPC call failed, some of possible codes are: -// - codes.NotFound - request cannot be served by EN because of absence of data. -// - codes.Unavailable - remote node is not unavailable. -func (b *backendTransactions) GetTransactionErrorMessagesFromAnyEN( - ctx context.Context, - execNodes flow.IdentitySkeletonList, - req *execproto.GetTransactionErrorMessagesByBlockIDRequest, -) ([]*execproto.GetTransactionErrorMessagesResponse_Result, *flow.IdentitySkeleton, error) { - // if we were passed 0 execution nodes add a specific error - if len(execNodes) == 0 { - return nil, nil, errors.New("zero execution nodes") - } - - var resp *execproto.GetTransactionErrorMessagesResponse - var execNode *flow.IdentitySkeleton - - errToReturn := b.nodeCommunicator.CallAvailableNode( - execNodes, - func(node *flow.IdentitySkeleton) error { - var err error - execNode = node - resp, err = b.tryGetTransactionErrorMessagesByBlockIDFromEN(ctx, node, req) - if err == nil { - b.log.Debug(). - Str("execution_node", node.String()). - Hex("block_id", req.GetBlockId()). - Msg("Successfully got transaction error messages from any node") - return nil - } - return err - }, - nil, - ) - - // log the errors - if errToReturn != nil { - b.log.Err(errToReturn).Msg("failed to get transaction error messages from execution nodes") - return nil, nil, errToReturn - } - - return resp.GetResults(), execNode, nil -} - -// Expected errors during normal operation: -// - status.Error - GRPC call failed, some of possible codes are: -// - codes.NotFound - request cannot be served by EN because of absence of data. -// - codes.Unavailable - remote node is not unavailable. -// -// tryGetTransactionErrorMessageFromEN performs a grpc call to the specified execution node and returns response. -func (b *backendTransactions) tryGetTransactionErrorMessageFromEN( - ctx context.Context, - execNode *flow.IdentitySkeleton, - req *execproto.GetTransactionErrorMessageRequest, -) (*execproto.GetTransactionErrorMessageResponse, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(execNode.Address) - if err != nil { - return nil, err - } - defer closer.Close() - return execRPCClient.GetTransactionErrorMessage(ctx, req) -} - -// tryGetTransactionErrorMessageByIndexFromEN performs a grpc call to the specified execution node and returns response. -// Expected errors during normal operation: -// - status.Error - GRPC call failed, some of possible codes are: -// - codes.NotFound - request cannot be served by EN because of absence of data. -// - codes.Unavailable - remote node is not unavailable. -func (b *backendTransactions) tryGetTransactionErrorMessageByIndexFromEN( - ctx context.Context, - execNode *flow.IdentitySkeleton, - req *execproto.GetTransactionErrorMessageByIndexRequest, -) (*execproto.GetTransactionErrorMessageResponse, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(execNode.Address) - if err != nil { - return nil, err - } - defer closer.Close() - return execRPCClient.GetTransactionErrorMessageByIndex(ctx, req) -} - -// tryGetTransactionErrorMessagesByBlockIDFromEN performs a grpc call to the specified execution node and returns response. -// Expected errors during normal operation: -// - status.Error - GRPC call failed, some of possible codes are: -// - codes.NotFound - request cannot be served by EN because of absence of data. -// - codes.Unavailable - remote node is not unavailable. -func (b *backendTransactions) tryGetTransactionErrorMessagesByBlockIDFromEN( - ctx context.Context, - execNode *flow.IdentitySkeleton, - req *execproto.GetTransactionErrorMessagesByBlockIDRequest, -) (*execproto.GetTransactionErrorMessagesResponse, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(execNode.Address) - if err != nil { - return nil, err - } - defer closer.Close() - return execRPCClient.GetTransactionErrorMessagesByBlockID(ctx, req) -} diff --git a/engine/access/rpc/backend/backend_transactions_test.go b/engine/access/rpc/backend/backend_transactions_test.go deleted file mode 100644 index 6005ee74aaf..00000000000 --- a/engine/access/rpc/backend/backend_transactions_test.go +++ /dev/null @@ -1,1545 +0,0 @@ -package backend - -import ( - "context" - "fmt" - "math/rand" - - "github.com/dgraph-io/badger/v2" - jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/flow/protobuf/go/flow/entities" - execproto "github.com/onflow/flow/protobuf/go/flow/execution" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/engine/access/index" - connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" - "github.com/onflow/flow-go/engine/common/rpc/convert" - "github.com/onflow/flow-go/fvm/blueprints" - accessmodel "github.com/onflow/flow-go/model/access" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" - syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" - "github.com/onflow/flow-go/state/protocol" - bprotocol "github.com/onflow/flow-go/state/protocol/badger" - "github.com/onflow/flow-go/state/protocol/util" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - "github.com/onflow/flow-go/utils/unittest/generator" - "github.com/onflow/flow-go/utils/unittest/mocks" -) - -const expectedErrorMsg = "expected test error" - -func (suite *Suite) withPreConfiguredState(f func(snap protocol.Snapshot)) { - identities := unittest.CompleteIdentitySet() - rootSnapshot := unittest.RootSnapshotFixture(identities) - util.RunWithFullProtocolStateAndMutator(suite.T(), rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState, mutableState protocol.MutableProtocolState) { - epochBuilder := unittest.NewEpochBuilder(suite.T(), mutableState, state) - - epochBuilder. - BuildEpoch(). - CompleteEpoch() - - // get heights of each phase in built epochs - epoch1, ok := epochBuilder.EpochHeights(1) - require.True(suite.T(), ok) - - // setup AtHeight mock returns for State - for _, height := range epoch1.Range() { - suite.state.On("AtHeight", epoch1.Range()).Return(state.AtHeight(height)) - } - - snap := state.AtHeight(epoch1.FinalHeight()) - suite.state.On("Final").Return(snap) - suite.communicator.On("CallAvailableNode", - mock.Anything, - mock.Anything, - mock.Anything). - Return(nil).Once() - - f(snap) - }) -} - -// TestGetTransactionResultReturnsUnknown returns unknown result when tx not found -func (suite *Suite) TestGetTransactionResultReturnsUnknown() { - suite.withPreConfiguredState(func(snap protocol.Snapshot) { - block := unittest.BlockFixture() - tbody := unittest.TransactionBodyFixture() - tx := unittest.TransactionFixture() - tx.TransactionBody = tbody - - coll := flow.CollectionFromTransactions([]*flow.Transaction{&tx}) - suite.state.On("AtBlockID", block.ID()).Return(snap, nil).Once() - - suite.transactions. - On("ByID", tx.ID()). - Return(nil, storage.ErrNotFound) - - params := suite.defaultBackendParams() - params.Communicator = suite.communicator - - backend, err := New(params) - suite.Require().NoError(err) - - res, err := backend.GetTransactionResult( - context.Background(), - tx.ID(), - block.ID(), - coll.ID(), - entities.EventEncodingVersion_JSON_CDC_V0, - ) - suite.Require().NoError(err) - suite.Require().Equal(res.Status, flow.TransactionStatusUnknown) - }) -} - -// TestGetTransactionResultReturnsTransactionError returns error from transaction storage -func (suite *Suite) TestGetTransactionResultReturnsTransactionError() { - suite.withPreConfiguredState(func(snap protocol.Snapshot) { - block := unittest.BlockFixture() - tbody := unittest.TransactionBodyFixture() - tx := unittest.TransactionFixture() - tx.TransactionBody = tbody - - coll := flow.CollectionFromTransactions([]*flow.Transaction{&tx}) - - suite.transactions. - On("ByID", tx.ID()). - Return(nil, fmt.Errorf("some other error")) - - suite.blocks. - On("ByID", block.ID()). - Return(&block, nil). - Once() - - suite.state.On("AtBlockID", block.ID()).Return(snap, nil).Once() - - params := suite.defaultBackendParams() - params.Communicator = suite.communicator - - backend, err := New(params) - suite.Require().NoError(err) - - _, err = backend.GetTransactionResult( - context.Background(), - tx.ID(), - block.ID(), - coll.ID(), - entities.EventEncodingVersion_JSON_CDC_V0, - ) - suite.Require().Equal(err, status.Errorf(codes.Internal, "failed to find: %v", fmt.Errorf("some other error"))) - }) -} - -// TestGetTransactionResultReturnsValidTransactionResultFromHistoricNode tests lookup in historic nodes -func (suite *Suite) TestGetTransactionResultReturnsValidTransactionResultFromHistoricNode() { - suite.withPreConfiguredState(func(snap protocol.Snapshot) { - block := unittest.BlockFixture() - tbody := unittest.TransactionBodyFixture() - tx := unittest.TransactionFixture() - tx.TransactionBody = tbody - - coll := flow.CollectionFromTransactions([]*flow.Transaction{&tx}) - - suite.transactions. - On("ByID", tx.ID()). - Return(nil, storage.ErrNotFound) - - suite.state.On("AtBlockID", block.ID()).Return(snap, nil).Once() - - transactionResultResponse := access.TransactionResultResponse{ - Status: entities.TransactionStatus_EXECUTED, - StatusCode: uint32(entities.TransactionStatus_EXECUTED), - } - - suite.historicalAccessClient. - On("GetTransactionResult", mock.Anything, mock.Anything). - Return(&transactionResultResponse, nil).Once() - - params := suite.defaultBackendParams() - params.HistoricalAccessNodes = []access.AccessAPIClient{suite.historicalAccessClient} - params.Communicator = suite.communicator - - backend, err := New(params) - suite.Require().NoError(err) - - resp, err := backend.GetTransactionResult( - context.Background(), - tx.ID(), - block.ID(), - coll.ID(), - entities.EventEncodingVersion_JSON_CDC_V0, - ) - suite.Require().NoError(err) - suite.Require().Equal(flow.TransactionStatusExecuted, resp.Status) - suite.Require().Equal(uint(flow.TransactionStatusExecuted), resp.StatusCode) - }) -} - -func (suite *Suite) withGetTransactionCachingTestSetup(f func(b *flow.Block, t *flow.Transaction)) { - suite.withPreConfiguredState(func(snap protocol.Snapshot) { - block := unittest.BlockFixture() - tbody := unittest.TransactionBodyFixture() - tx := unittest.TransactionFixture() - tx.TransactionBody = tbody - - suite.transactions. - On("ByID", tx.ID()). - Return(nil, storage.ErrNotFound) - - suite.state.On("AtBlockID", block.ID()).Return(snap, nil).Once() - - f(&block, &tx) - }) -} - -// TestGetTransactionResultFromCache get historic transaction result from cache -func (suite *Suite) TestGetTransactionResultFromCache() { - suite.withGetTransactionCachingTestSetup(func(block *flow.Block, tx *flow.Transaction) { - transactionResultResponse := access.TransactionResultResponse{ - Status: entities.TransactionStatus_EXECUTED, - StatusCode: uint32(entities.TransactionStatus_EXECUTED), - } - - suite.historicalAccessClient. - On("GetTransactionResult", mock.Anything, mock.AnythingOfType("*access.GetTransactionRequest")). - Return(&transactionResultResponse, nil).Once() - - params := suite.defaultBackendParams() - params.HistoricalAccessNodes = []access.AccessAPIClient{suite.historicalAccessClient} - params.Communicator = suite.communicator - params.TxResultCacheSize = 10 - - backend, err := New(params) - suite.Require().NoError(err) - - coll := flow.CollectionFromTransactions([]*flow.Transaction{tx}) - - resp, err := backend.GetTransactionResult( - context.Background(), - tx.ID(), - block.ID(), - coll.ID(), - entities.EventEncodingVersion_JSON_CDC_V0, - ) - suite.Require().NoError(err) - suite.Require().Equal(flow.TransactionStatusExecuted, resp.Status) - suite.Require().Equal(uint(flow.TransactionStatusExecuted), resp.StatusCode) - - resp2, err := backend.GetTransactionResult( - context.Background(), - tx.ID(), - block.ID(), - coll.ID(), - entities.EventEncodingVersion_JSON_CDC_V0, - ) - suite.Require().NoError(err) - suite.Require().Equal(flow.TransactionStatusExecuted, resp2.Status) - suite.Require().Equal(uint(flow.TransactionStatusExecuted), resp2.StatusCode) - - suite.historicalAccessClient.AssertExpectations(suite.T()) - }) -} - -// TestGetTransactionResultCacheNonExistent tests caches non existing result -func (suite *Suite) TestGetTransactionResultCacheNonExistent() { - suite.withGetTransactionCachingTestSetup(func(block *flow.Block, tx *flow.Transaction) { - suite.historicalAccessClient. - On("GetTransactionResult", mock.Anything, mock.AnythingOfType("*access.GetTransactionRequest")). - Return(nil, status.Errorf(codes.NotFound, "no known transaction with ID %s", tx.ID())).Once() - - params := suite.defaultBackendParams() - params.HistoricalAccessNodes = []access.AccessAPIClient{suite.historicalAccessClient} - params.Communicator = suite.communicator - params.TxResultCacheSize = 10 - - backend, err := New(params) - suite.Require().NoError(err) - - coll := flow.CollectionFromTransactions([]*flow.Transaction{tx}) - - resp, err := backend.GetTransactionResult( - context.Background(), - tx.ID(), - block.ID(), - coll.ID(), - entities.EventEncodingVersion_JSON_CDC_V0, - ) - suite.Require().NoError(err) - suite.Require().Equal(flow.TransactionStatusUnknown, resp.Status) - suite.Require().Equal(uint(flow.TransactionStatusUnknown), resp.StatusCode) - - // ensure the unknown transaction is cached when not found anywhere - txStatus := flow.TransactionStatusUnknown - res, ok := backend.txResultCache.Get(tx.ID()) - suite.Require().True(ok) - suite.Require().Equal(res, &accessmodel.TransactionResult{ - Status: txStatus, - StatusCode: uint(txStatus), - }) - - suite.historicalAccessClient.AssertExpectations(suite.T()) - }) -} - -// TestGetTransactionResultUnknownFromCache retrieve unknown result from cache. -func (suite *Suite) TestGetTransactionResultUnknownFromCache() { - suite.withGetTransactionCachingTestSetup(func(block *flow.Block, tx *flow.Transaction) { - suite.historicalAccessClient. - On("GetTransactionResult", mock.Anything, mock.AnythingOfType("*access.GetTransactionRequest")). - Return(nil, status.Errorf(codes.NotFound, "no known transaction with ID %s", tx.ID())).Once() - - params := suite.defaultBackendParams() - params.HistoricalAccessNodes = []access.AccessAPIClient{suite.historicalAccessClient} - params.Communicator = suite.communicator - params.TxResultCacheSize = 10 - - backend, err := New(params) - suite.Require().NoError(err) - - coll := flow.CollectionFromTransactions([]*flow.Transaction{tx}) - - resp, err := backend.GetTransactionResult( - context.Background(), - tx.ID(), - block.ID(), - coll.ID(), - entities.EventEncodingVersion_JSON_CDC_V0, - ) - suite.Require().NoError(err) - suite.Require().Equal(flow.TransactionStatusUnknown, resp.Status) - suite.Require().Equal(uint(flow.TransactionStatusUnknown), resp.StatusCode) - - // ensure the unknown transaction is cached when not found anywhere - txStatus := flow.TransactionStatusUnknown - res, ok := backend.txResultCache.Get(tx.ID()) - suite.Require().True(ok) - suite.Require().Equal(res, &accessmodel.TransactionResult{ - Status: txStatus, - StatusCode: uint(txStatus), - }) - - resp2, err := backend.GetTransactionResult( - context.Background(), - tx.ID(), - block.ID(), - coll.ID(), - entities.EventEncodingVersion_JSON_CDC_V0, - ) - suite.Require().NoError(err) - suite.Require().Equal(flow.TransactionStatusUnknown, resp2.Status) - suite.Require().Equal(uint(flow.TransactionStatusUnknown), resp2.StatusCode) - - suite.historicalAccessClient.AssertExpectations(suite.T()) - }) -} - -// TestLookupTransactionErrorMessageByTransactionID_HappyPath verifies the lookup of a transaction error message -// by block id and transaction id. -// It tests two cases: -// 1. Happy path where the error message is fetched from the EN if it's not found in the cache. -// 2. Happy path where the error message is served from the storage database if it exists. -func (suite *Suite) TestLookupTransactionErrorMessageByTransactionID_HappyPath() { - block := unittest.BlockFixture() - blockId := block.ID() - failedTx := unittest.TransactionFixture() - failedTxId := failedTx.ID() - failedTxIndex := rand.Uint32() - - // Setup mock receipts and execution node identities. - _, fixedENIDs := suite.setupReceipts(&block) - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) - - suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() - - params := suite.defaultBackendParams() - params.TxResultErrorMessages = suite.txErrorMessages - - // Test case: transaction error message is fetched from the EN. - suite.Run("happy path from EN", func() { - // the connection factory should be used to get the execution node client - params.ConnFactory = suite.setupConnectionFactory() - - // Mock the cache lookup for the transaction error message, returning "not found". - suite.txErrorMessages.On("ByBlockIDTransactionID", blockId, failedTxId). - Return(nil, storage.ErrNotFound).Once() - - backend, err := New(params) - suite.Require().NoError(err) - - // Mock the execution node API call to fetch the error message. - exeEventReq := &execproto.GetTransactionErrorMessageRequest{ - BlockId: blockId[:], - TransactionId: failedTxId[:], - } - exeEventResp := &execproto.GetTransactionErrorMessageResponse{ - TransactionId: failedTxId[:], - ErrorMessage: expectedErrorMsg, - } - suite.execClient.On("GetTransactionErrorMessage", mock.Anything, exeEventReq).Return(exeEventResp, nil).Once() - - // Perform the lookup and assert that the error message is retrieved correctly. - errMsg, err := backend.LookupErrorMessageByTransactionID(context.Background(), blockId, block.Header.Height, failedTxId) - suite.Require().NoError(err) - suite.Require().Equal(expectedErrorMsg, errMsg) - suite.assertAllExpectations() - }) - - // Test case: transaction error message is fetched from the storage database. - suite.Run("happy path from storage db", func() { - backend, err := New(params) - suite.Require().NoError(err) - - // Mock the cache lookup for the transaction error message, returning a stored result. - suite.txErrorMessages.On("ByBlockIDTransactionID", blockId, failedTxId). - Return(&flow.TransactionResultErrorMessage{ - TransactionID: failedTxId, - ErrorMessage: expectedErrorMsg, - Index: failedTxIndex, - ExecutorID: unittest.IdentifierFixture(), - }, nil).Once() - - // Perform the lookup and assert that the error message is retrieved correctly from storage. - errMsg, err := backend.LookupErrorMessageByTransactionID(context.Background(), blockId, block.Header.Height, failedTxId) - suite.Require().NoError(err) - suite.Require().Equal(expectedErrorMsg, errMsg) - suite.assertAllExpectations() - }) -} - -// TestLookupTransactionErrorMessageByTransactionID_FailedToFetch tests the case when a transaction error message -// is not in the cache and needs to be fetched from the EN, but the EN fails to return it. -// It tests three cases: -// 1. The transaction is not found in the transaction results, leading to a "NotFound" error. -// 2. The transaction result is not failed, and the error message is empty. -// 3. The transaction result is failed, and the error message "failed" are returned. -func (suite *Suite) TestLookupTransactionErrorMessageByTransactionID_FailedToFetch() { - block := unittest.BlockFixture() - blockId := block.ID() - failedTx := unittest.TransactionFixture() - failedTxId := failedTx.ID() - - // Setup mock receipts and execution node identities. - _, fixedENIDs := suite.setupReceipts(&block) - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) - - // Create a mock index reporter - reporter := syncmock.NewIndexReporter(suite.T()) - reporter.On("LowestIndexedHeight").Return(block.Header.Height, nil) - reporter.On("HighestIndexedHeight").Return(block.Header.Height+10, nil) - - suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() - - params := suite.defaultBackendParams() - // The connection factory should be used to get the execution node client - params.ConnFactory = suite.setupConnectionFactory() - // Initialize the transaction results index with the mock reporter. - params.TxResultsIndex = index.NewTransactionResultsIndex(index.NewReporter(), suite.transactionResults) - err := params.TxResultsIndex.Initialize(reporter) - suite.Require().NoError(err) - - params.TxResultErrorMessages = suite.txErrorMessages - - backend, err := New(params) - suite.Require().NoError(err) - - // Test case: failed to fetch from EN, transaction is unknown. - suite.Run("failed to fetch from EN, unknown tx", func() { - // lookup should try each of the 2 ENs in fixedENIDs - suite.execClient.On("GetTransactionErrorMessage", mock.Anything, mock.Anything).Return(nil, - status.Error(codes.Unavailable, "")).Twice() - - // Setup mock that the transaction and tx error message is not found in the storage. - suite.txErrorMessages.On("ByBlockIDTransactionID", blockId, failedTxId). - Return(nil, storage.ErrNotFound).Once() - suite.transactionResults.On("ByBlockIDTransactionID", blockId, failedTxId). - Return(nil, storage.ErrNotFound).Once() - - // Perform the lookup and expect a "NotFound" error with an empty error message. - errMsg, err := backend.LookupErrorMessageByTransactionID(context.Background(), blockId, block.Header.Height, failedTxId) - suite.Require().Error(err) - suite.Require().Equal(codes.NotFound, status.Code(err)) - suite.Require().Empty(errMsg) - suite.assertAllExpectations() - }) - - // Test case: failed to fetch from EN, but the transaction result is not failed. - suite.Run("failed to fetch from EN, tx result is not failed", func() { - // Lookup should try each of the 2 ENs in fixedENIDs - suite.execClient.On("GetTransactionErrorMessage", mock.Anything, mock.Anything).Return(nil, - status.Error(codes.Unavailable, "")).Twice() - - // Setup mock that the transaction error message is not found in storage. - suite.txErrorMessages.On("ByBlockIDTransactionID", blockId, failedTxId). - Return(nil, storage.ErrNotFound).Once() - - // Setup mock that the transaction result exists and is not failed. - suite.transactionResults.On("ByBlockIDTransactionID", blockId, failedTxId). - Return(&flow.LightTransactionResult{ - TransactionID: failedTxId, - Failed: false, - ComputationUsed: 0, - }, nil).Once() - - // Perform the lookup and expect no error and an empty error message. - errMsg, err := backend.LookupErrorMessageByTransactionID(context.Background(), blockId, block.Header.Height, failedTxId) - suite.Require().NoError(err) - suite.Require().Empty(errMsg) - suite.assertAllExpectations() - }) - - // Test case: failed to fetch from EN, but the transaction result is failed. - suite.Run("failed to fetch from EN, tx result is failed", func() { - // lookup should try each of the 2 ENs in fixedENIDs - suite.execClient.On("GetTransactionErrorMessage", mock.Anything, mock.Anything).Return(nil, - status.Error(codes.Unavailable, "")).Twice() - - // Setup mock that the transaction error message is not found in storage. - suite.txErrorMessages.On("ByBlockIDTransactionID", blockId, failedTxId). - Return(nil, storage.ErrNotFound).Once() - - // Setup mock that the transaction result exists and is failed. - suite.transactionResults.On("ByBlockIDTransactionID", blockId, failedTxId). - Return(&flow.LightTransactionResult{ - TransactionID: failedTxId, - Failed: true, - ComputationUsed: 0, - }, nil).Once() - - // Perform the lookup and expect the failed error message to be returned. - errMsg, err := backend.LookupErrorMessageByTransactionID(context.Background(), blockId, block.Header.Height, failedTxId) - suite.Require().NoError(err) - suite.Require().Equal(errMsg, DefaultFailedErrorMessage) - suite.assertAllExpectations() - }) -} - -// TestLookupTransactionErrorMessageByIndex_HappyPath verifies the lookup of a transaction error message -// by block ID and transaction index. -// It tests two cases: -// 1. Happy path where the error message is fetched from the EN if it is not found in the cache. -// 2. Happy path where the error message is served from the storage database if it exists. -func (suite *Suite) TestLookupTransactionErrorMessageByIndex_HappyPath() { - block := unittest.BlockFixture() - blockId := block.ID() - failedTx := unittest.TransactionFixture() - failedTxId := failedTx.ID() - failedTxIndex := rand.Uint32() - - // Setup mock receipts and execution node identities. - _, fixedENIDs := suite.setupReceipts(&block) - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) - - suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() - - params := suite.defaultBackendParams() - params.TxResultErrorMessages = suite.txErrorMessages - - // Test case: transaction error message is fetched from the EN. - suite.Run("happy path from EN", func() { - // the connection factory should be used to get the execution node client - params.ConnFactory = suite.setupConnectionFactory() - - // Mock the cache lookup for the transaction error message, returning "not found". - suite.txErrorMessages.On("ByBlockIDTransactionIndex", blockId, failedTxIndex). - Return(nil, storage.ErrNotFound).Once() - - backend, err := New(params) - suite.Require().NoError(err) - - // Mock the execution node API call to fetch the error message. - exeEventReq := &execproto.GetTransactionErrorMessageByIndexRequest{ - BlockId: blockId[:], - Index: failedTxIndex, - } - exeEventResp := &execproto.GetTransactionErrorMessageResponse{ - TransactionId: failedTxId[:], - ErrorMessage: expectedErrorMsg, - } - suite.execClient.On("GetTransactionErrorMessageByIndex", mock.Anything, exeEventReq).Return(exeEventResp, nil).Once() - - // Perform the lookup and assert that the error message is retrieved correctly. - errMsg, err := backend.LookupErrorMessageByIndex(context.Background(), blockId, block.Header.Height, failedTxIndex) - suite.Require().NoError(err) - suite.Require().Equal(expectedErrorMsg, errMsg) - suite.assertAllExpectations() - }) - - // Test case: transaction error message is fetched from the storage database. - suite.Run("happy path from storage db", func() { - backend, err := New(params) - suite.Require().NoError(err) - - // Mock the cache lookup for the transaction error message, returning a stored result. - suite.txErrorMessages.On("ByBlockIDTransactionIndex", blockId, failedTxIndex). - Return(&flow.TransactionResultErrorMessage{ - TransactionID: failedTxId, - ErrorMessage: expectedErrorMsg, - Index: failedTxIndex, - ExecutorID: unittest.IdentifierFixture(), - }, nil).Once() - - // Perform the lookup and assert that the error message is retrieved correctly from storage. - errMsg, err := backend.LookupErrorMessageByIndex(context.Background(), blockId, block.Header.Height, failedTxIndex) - suite.Require().NoError(err) - suite.Require().Equal(expectedErrorMsg, errMsg) - suite.assertAllExpectations() - }) -} - -// TestLookupTransactionErrorMessageByIndex_FailedToFetch verifies the behavior of looking up a transaction error message by index -// when the error message is not in the cache, and fetching it from the EN fails. -// It tests three cases: -// 1. The transaction is not found in the transaction results, leading to a "NotFound" error. -// 2. The transaction result is not failed, and the error message is empty. -// 3. The transaction result is failed, and the error message "failed" are returned. -func (suite *Suite) TestLookupTransactionErrorMessageByIndex_FailedToFetch() { - block := unittest.BlockFixture() - blockId := block.ID() - failedTxIndex := rand.Uint32() - failedTx := unittest.TransactionFixture() - failedTxId := failedTx.ID() - - // Setup mock receipts and execution node identities. - _, fixedENIDs := suite.setupReceipts(&block) - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) - - // Create a mock connection factory - connFactory := connectionmock.NewConnectionFactory(suite.T()) - connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mocks.MockCloser{}, nil) - - // Create a mock index reporter - reporter := syncmock.NewIndexReporter(suite.T()) - reporter.On("LowestIndexedHeight").Return(block.Header.Height, nil) - reporter.On("HighestIndexedHeight").Return(block.Header.Height+10, nil) - - suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() - - params := suite.defaultBackendParams() - // the connection factory should be used to get the execution node client - params.ConnFactory = connFactory - // Initialize the transaction results index with the mock reporter. - params.TxResultsIndex = index.NewTransactionResultsIndex(index.NewReporter(), suite.transactionResults) - err := params.TxResultsIndex.Initialize(reporter) - suite.Require().NoError(err) - - params.TxResultErrorMessages = suite.txErrorMessages - - backend, err := New(params) - suite.Require().NoError(err) - - // Test case: failed to fetch from EN, transaction is unknown. - suite.Run("failed to fetch from EN, unknown tx", func() { - // lookup should try each of the 2 ENs in fixedENIDs - suite.execClient.On("GetTransactionErrorMessageByIndex", mock.Anything, mock.Anything).Return(nil, - status.Error(codes.Unavailable, "")).Twice() - - // Setup mock that the transaction and tx error message is not found in the storage. - suite.txErrorMessages.On("ByBlockIDTransactionIndex", blockId, failedTxIndex). - Return(nil, storage.ErrNotFound).Once() - suite.transactionResults.On("ByBlockIDTransactionIndex", blockId, failedTxIndex). - Return(nil, storage.ErrNotFound).Once() - - // Perform the lookup and expect a "NotFound" error with an empty error message. - errMsg, err := backend.LookupErrorMessageByIndex(context.Background(), blockId, block.Header.Height, failedTxIndex) - suite.Require().Error(err) - suite.Require().Equal(codes.NotFound, status.Code(err)) - suite.Require().Empty(errMsg) - suite.assertAllExpectations() - }) - - // Test case: failed to fetch from EN, but the transaction result is not failed. - suite.Run("failed to fetch from EN, tx result is not failed", func() { - // lookup should try each of the 2 ENs in fixedENIDs - suite.execClient.On("GetTransactionErrorMessageByIndex", mock.Anything, mock.Anything).Return(nil, - status.Error(codes.Unavailable, "")).Twice() - - // Setup mock that the transaction error message is not found in storage. - suite.txErrorMessages.On("ByBlockIDTransactionIndex", blockId, failedTxIndex). - Return(nil, storage.ErrNotFound).Once() - - // Setup mock that the transaction result exists and is not failed. - suite.transactionResults.On("ByBlockIDTransactionIndex", blockId, failedTxIndex). - Return(&flow.LightTransactionResult{ - TransactionID: failedTxId, - Failed: false, - ComputationUsed: 0, - }, nil).Once() - - // Perform the lookup and expect no error and an empty error message. - errMsg, err := backend.LookupErrorMessageByIndex(context.Background(), blockId, block.Header.Height, failedTxIndex) - suite.Require().NoError(err) - suite.Require().Empty(errMsg) - suite.assertAllExpectations() - }) - - // Test case: failed to fetch from EN, but the transaction result is failed. - suite.Run("failed to fetch from EN, tx result is failed", func() { - // lookup should try each of the 2 ENs in fixedENIDs - suite.execClient.On("GetTransactionErrorMessageByIndex", mock.Anything, mock.Anything).Return(nil, - status.Error(codes.Unavailable, "")).Twice() - - // Setup mock that the transaction error message is not found in storage. - suite.txErrorMessages.On("ByBlockIDTransactionIndex", blockId, failedTxIndex). - Return(nil, storage.ErrNotFound).Once() - - // Setup mock that the transaction result exists and is failed. - suite.transactionResults.On("ByBlockIDTransactionIndex", blockId, failedTxIndex). - Return(&flow.LightTransactionResult{ - TransactionID: failedTxId, - Failed: true, - ComputationUsed: 0, - }, nil).Once() - - // Perform the lookup and expect the failed error message to be returned. - errMsg, err := backend.LookupErrorMessageByIndex(context.Background(), blockId, block.Header.Height, failedTxIndex) - suite.Require().NoError(err) - suite.Require().Equal(errMsg, DefaultFailedErrorMessage) - suite.assertAllExpectations() - }) -} - -// TestLookupTransactionErrorMessagesByBlockID_HappyPath verifies the lookup of transaction error messages by block ID. -// It tests two cases: -// 1. Happy path where the error messages are fetched from the EN if they are not found in the cache. -// 2. Happy path where the error messages are served from the storage database if they exist. -func (suite *Suite) TestLookupTransactionErrorMessagesByBlockID_HappyPath() { - block := unittest.BlockFixture() - blockId := block.ID() - - resultsByBlockID := make([]flow.LightTransactionResult, 0) - for i := 0; i < 5; i++ { - resultsByBlockID = append(resultsByBlockID, flow.LightTransactionResult{ - TransactionID: unittest.IdentifierFixture(), - Failed: i%2 == 0, // create a mix of failed and non-failed transactions - ComputationUsed: 0, - }) - } - - _, fixedENIDs := suite.setupReceipts(&block) - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) - - suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() - - params := suite.defaultBackendParams() - params.TxResultErrorMessages = suite.txErrorMessages - - // Test case: transaction error messages is fetched from the EN. - suite.Run("happy path from EN", func() { - // the connection factory should be used to get the execution node client - params.ConnFactory = suite.setupConnectionFactory() - - // Mock the cache lookup for the transaction error messages, returning "not found". - suite.txErrorMessages.On("ByBlockID", blockId). - Return(nil, storage.ErrNotFound).Once() - - backend, err := New(params) - suite.Require().NoError(err) - - // Mock the execution node API call to fetch the error messages. - exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ - BlockId: blockId[:], - } - exeErrMessagesResp := &execproto.GetTransactionErrorMessagesResponse{} - for _, result := range resultsByBlockID { - r := result - if r.Failed { - errMsg := fmt.Sprintf("%s.%s", expectedErrorMsg, r.TransactionID) - exeErrMessagesResp.Results = append(exeErrMessagesResp.Results, &execproto.GetTransactionErrorMessagesResponse_Result{ - TransactionId: r.TransactionID[:], - ErrorMessage: errMsg, - }) - } - } - suite.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). - Return(exeErrMessagesResp, nil). - Once() - - // Perform the lookup and assert that the error message is retrieved correctly. - errMessages, err := backend.LookupErrorMessagesByBlockID(context.Background(), blockId, block.Header.Height) - suite.Require().NoError(err) - suite.Require().Len(errMessages, len(exeErrMessagesResp.Results)) - for _, expectedResult := range exeErrMessagesResp.Results { - errMsg, ok := errMessages[convert.MessageToIdentifier(expectedResult.TransactionId)] - suite.Require().True(ok) - suite.Assert().Equal(expectedResult.ErrorMessage, errMsg) - } - suite.assertAllExpectations() - }) - - // Test case: transaction error messages is fetched from the storage database. - suite.Run("happy path from storage db", func() { - backend, err := New(params) - suite.Require().NoError(err) - - // Mock the cache lookup for the transaction error messages, returning a stored result. - var txErrorMessages []flow.TransactionResultErrorMessage - for i, result := range resultsByBlockID { - if result.Failed { - errMsg := fmt.Sprintf("%s.%s", expectedErrorMsg, result.TransactionID) - - txErrorMessages = append(txErrorMessages, - flow.TransactionResultErrorMessage{ - TransactionID: result.TransactionID, - ErrorMessage: errMsg, - Index: uint32(i), - ExecutorID: unittest.IdentifierFixture(), - }) - } - } - suite.txErrorMessages.On("ByBlockID", blockId). - Return(txErrorMessages, nil).Once() - - // Perform the lookup and assert that the error message is retrieved correctly from storage. - errMessages, err := backend.LookupErrorMessagesByBlockID(context.Background(), blockId, block.Header.Height) - suite.Require().NoError(err) - suite.Require().Len(errMessages, len(txErrorMessages)) - for _, expected := range txErrorMessages { - errMsg, ok := errMessages[expected.TransactionID] - suite.Require().True(ok) - suite.Assert().Equal(expected.ErrorMessage, errMsg) - } - suite.assertAllExpectations() - }) -} - -// TestLookupTransactionErrorMessagesByBlockID_FailedToFetch tests lookup of a transaction error messages by block ID, -// when a transaction result is not in the cache and needs to be fetched from EN, but the EN fails to return it. -// It tests three cases: -// 1. The transaction is not found in the transaction results, leading to a "NotFound" error. -// 2. The transaction result is not failed, and the error message is empty. -// 3. The transaction result is failed, and the error message "failed" are returned. -func (suite *Suite) TestLookupTransactionErrorMessagesByBlockID_FailedToFetch() { - block := unittest.BlockFixture() - blockId := block.ID() - - // Setup mock receipts and execution node identities. - _, fixedENIDs := suite.setupReceipts(&block) - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) - - // Create a mock index reporter - reporter := syncmock.NewIndexReporter(suite.T()) - reporter.On("LowestIndexedHeight").Return(block.Header.Height, nil) - reporter.On("HighestIndexedHeight").Return(block.Header.Height+10, nil) - - suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() - - params := suite.defaultBackendParams() - // the connection factory should be used to get the execution node client - params.ConnFactory = suite.setupConnectionFactory() - // Initialize the transaction results index with the mock reporter. - params.TxResultsIndex = index.NewTransactionResultsIndex(index.NewReporter(), suite.transactionResults) - err := params.TxResultsIndex.Initialize(reporter) - suite.Require().NoError(err) - - params.TxResultErrorMessages = suite.txErrorMessages - - backend, err := New(params) - suite.Require().NoError(err) - - // Test case: failed to fetch from EN, transaction is unknown. - suite.Run("failed to fetch from EN, unknown tx", func() { - // lookup should try each of the 2 ENs in fixedENIDs - suite.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, mock.Anything).Return(nil, - status.Error(codes.Unavailable, "")).Twice() - - // Setup mock that the transaction and tx error messages is not found in the storage. - suite.txErrorMessages.On("ByBlockID", blockId). - Return(nil, storage.ErrNotFound).Once() - suite.transactionResults.On("ByBlockID", blockId). - Return(nil, storage.ErrNotFound).Once() - - // Perform the lookup and expect a "NotFound" error with an empty error message. - errMsg, err := backend.LookupErrorMessagesByBlockID(context.Background(), blockId, block.Header.Height) - suite.Require().Error(err) - suite.Require().Equal(codes.NotFound, status.Code(err)) - suite.Require().Empty(errMsg) - suite.assertAllExpectations() - }) - - // Test case: failed to fetch from EN, but the transaction result is not failed. - suite.Run("failed to fetch from EN, tx result is not failed", func() { - // lookup should try each of the 2 ENs in fixedENIDs - suite.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, mock.Anything).Return(nil, - status.Error(codes.Unavailable, "")).Twice() - - // Setup mock that the transaction error message is not found in storage. - suite.txErrorMessages.On("ByBlockID", blockId). - Return(nil, storage.ErrNotFound).Once() - - // Setup mock that the transaction results exists and is not failed. - suite.transactionResults.On("ByBlockID", blockId). - Return([]flow.LightTransactionResult{ - { - TransactionID: unittest.IdentifierFixture(), - Failed: false, - ComputationUsed: 0, - }, - { - TransactionID: unittest.IdentifierFixture(), - Failed: false, - ComputationUsed: 0, - }, - }, nil).Once() - - // Perform the lookup and expect no error and an empty error messages. - errMsg, err := backend.LookupErrorMessagesByBlockID(context.Background(), blockId, block.Header.Height) - suite.Require().NoError(err) - suite.Require().Empty(errMsg) - suite.assertAllExpectations() - }) - - // Test case: failed to fetch from EN, but the transaction result is failed. - suite.Run("failed to fetch from EN, tx result is failed", func() { - failedResultsByBlockID := []flow.LightTransactionResult{ - { - TransactionID: unittest.IdentifierFixture(), - Failed: true, - ComputationUsed: 0, - }, - { - TransactionID: unittest.IdentifierFixture(), - Failed: true, - ComputationUsed: 0, - }, - } - - // lookup should try each of the 2 ENs in fixedENIDs - suite.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, mock.Anything).Return(nil, - status.Error(codes.Unavailable, "")).Twice() - - // Setup mock that the transaction error messages is not found in storage. - suite.txErrorMessages.On("ByBlockID", blockId). - Return(nil, storage.ErrNotFound).Once() - - // Setup mock that the transaction results exists and is failed. - suite.transactionResults.On("ByBlockID", blockId). - Return(failedResultsByBlockID, nil).Once() - - // Setup mock expected the transaction error messages after retrieving the failed result. - expectedTxErrorMessages := make(map[flow.Identifier]string) - for _, result := range failedResultsByBlockID { - if result.Failed { - expectedTxErrorMessages[result.TransactionID] = DefaultFailedErrorMessage - } - } - - // Perform the lookup and expect the failed error messages to be returned. - errMsg, err := backend.LookupErrorMessagesByBlockID(context.Background(), blockId, block.Header.Height) - suite.Require().NoError(err) - suite.Require().Len(errMsg, len(expectedTxErrorMessages)) - for txID, expectedMessage := range expectedTxErrorMessages { - actualMessage, ok := errMsg[txID] - suite.Require().True(ok) - suite.Assert().Equal(expectedMessage, actualMessage) - } - suite.assertAllExpectations() - }) -} - -// TestGetSystemTransaction_HappyPath tests that GetSystemTransaction call returns system chunk transaction. -func (suite *Suite) TestGetSystemTransaction_HappyPath() { - suite.withPreConfiguredState(func(snap protocol.Snapshot) { - suite.state.On("Sealed").Return(snap, nil).Maybe() - - params := suite.defaultBackendParams() - backend, err := New(params) - suite.Require().NoError(err) - - block := unittest.BlockFixture() - blockID := block.ID() - - // Make the call for the system chunk transaction - res, err := backend.GetSystemTransaction(context.Background(), blockID) - suite.Require().NoError(err) - // Expected system chunk transaction - systemTx, err := blueprints.SystemChunkTransaction(suite.chainID.Chain()) - suite.Require().NoError(err) - - suite.Require().Equal(systemTx, res) - }) -} - -// TestGetSystemTransactionResult_HappyPath tests that GetSystemTransactionResult call returns system transaction -// result for required block id. -func (suite *Suite) TestGetSystemTransactionResult_HappyPath() { - suite.withPreConfiguredState(func(snap protocol.Snapshot) { - suite.state.On("Sealed").Return(snap, nil).Maybe() - lastBlock, err := snap.Head() - suite.Require().NoError(err) - identities, err := snap.Identities(filter.Any) - suite.Require().NoError(err) - - block := unittest.BlockWithParentFixture(lastBlock) - blockID := block.ID() - suite.state.On("AtBlockID", blockID).Return( - unittest.StateSnapshotForKnownBlock(block.Header, identities.Lookup()), nil).Once() - - // block storage returns the corresponding block - suite.blocks. - On("ByID", blockID). - Return(block, nil). - Once() - - receipt1 := unittest.ReceiptForBlockFixture(block) - suite.receipts. - On("ByBlockID", block.ID()). - Return(flow.ExecutionReceiptList{receipt1}, nil) - - // the connection factory should be used to get the execution node client - params := suite.defaultBackendParams() - params.ConnFactory = suite.setupConnectionFactory() - - exeEventReq := &execproto.GetTransactionsByBlockIDRequest{ - BlockId: blockID[:], - } - - // Generating events with event generator - exeNodeEventEncodingVersion := entities.EventEncodingVersion_CCF_V0 - events := generator.GetEventsWithEncoding(1, exeNodeEventEncodingVersion) - eventMessages := convert.EventsToMessages(events) - - exeEventResp := &execproto.GetTransactionResultsResponse{ - TransactionResults: []*execproto.GetTransactionResultResponse{{ - Events: eventMessages, - EventEncodingVersion: exeNodeEventEncodingVersion, - }}, - EventEncodingVersion: exeNodeEventEncodingVersion, - } - - suite.execClient. - On("GetTransactionResultsByBlockID", mock.Anything, exeEventReq). - Return(exeEventResp, nil). - Once() - - backend, err := New(params) - suite.Require().NoError(err) - - suite.execClient. - On("GetTransactionResult", mock.Anything, mock.AnythingOfType("*execution.GetTransactionResultRequest")). - Return(exeEventResp.TransactionResults[0], nil). - Once() - - // Make the call for the system transaction result - res, err := backend.GetSystemTransactionResult( - context.Background(), - block.ID(), - entities.EventEncodingVersion_JSON_CDC_V0, - ) - suite.Require().NoError(err) - - // Expected system chunk transaction - suite.Require().Equal(flow.TransactionStatusExecuted, res.Status) - suite.Require().Equal(suite.systemTx.ID(), res.TransactionID) - - // Check for successful decoding of event - _, err = jsoncdc.Decode(nil, res.Events[0].Payload) - suite.Require().NoError(err) - - events, err = convert.MessagesToEventsWithEncodingConversion(eventMessages, - exeNodeEventEncodingVersion, - entities.EventEncodingVersion_JSON_CDC_V0) - suite.Require().NoError(err) - suite.Require().Equal(events, res.Events) - }) -} - -func (suite *Suite) TestGetSystemTransactionResultFromStorage() { - // Create fixtures for block, transaction, and collection - block := unittest.BlockFixture() - sysTx, err := blueprints.SystemChunkTransaction(suite.chainID.Chain()) - suite.Require().NoError(err) - suite.Require().NotNil(sysTx) - transaction := flow.Transaction{TransactionBody: *sysTx} - txId := suite.systemTx.ID() - blockId := block.ID() - - // Mock the behavior of the blocks and transactionResults objects - suite.blocks. - On("ByID", blockId). - Return(&block, nil). - Once() - - lightTxShouldFail := false - suite.transactionResults. - On("ByBlockIDTransactionID", blockId, txId). - Return(&flow.LightTransactionResult{ - TransactionID: txId, - Failed: lightTxShouldFail, - ComputationUsed: 0, - }, nil). - Once() - - suite.transactions. - On("ByID", txId). - Return(&transaction.TransactionBody, nil). - Once() - - // Set up the events storage mock - var eventsForTx []flow.Event - // expect a call to lookup events by block ID and transaction ID - suite.events.On("ByBlockIDTransactionID", blockId, txId).Return(eventsForTx, nil) - - // Set up the state and snapshot mocks - suite.state.On("Final").Return(suite.snapshot, nil).Once() - suite.state.On("Sealed").Return(suite.snapshot, nil).Once() - suite.snapshot.On("Head", mock.Anything).Return(block.Header, nil).Once() - - // create a mock index reporter - reporter := syncmock.NewIndexReporter(suite.T()) - reporter.On("LowestIndexedHeight").Return(block.Header.Height, nil) - reporter.On("HighestIndexedHeight").Return(block.Header.Height+10, nil) - - indexReporter := index.NewReporter() - err = indexReporter.Initialize(reporter) - suite.Require().NoError(err) - - // Set up the backend parameters and the backend instance - params := suite.defaultBackendParams() - params.TxResultQueryMode = IndexQueryModeLocalOnly - - params.EventsIndex = index.NewEventsIndex(indexReporter, suite.events) - params.TxResultsIndex = index.NewTransactionResultsIndex(indexReporter, suite.transactionResults) - - backend, err := New(params) - suite.Require().NoError(err) - - response, err := backend.GetSystemTransactionResult(context.Background(), blockId, entities.EventEncodingVersion_JSON_CDC_V0) - suite.assertTransactionResultResponse(err, response, block, txId, lightTxShouldFail, eventsForTx) -} - -// TestGetSystemTransactionResult_BlockNotFound tests GetSystemTransactionResult function when block was not found. -func (suite *Suite) TestGetSystemTransactionResult_BlockNotFound() { - suite.withPreConfiguredState(func(snap protocol.Snapshot) { - suite.state.On("Sealed").Return(snap, nil).Maybe() - lastBlock, err := snap.Head() - suite.Require().NoError(err) - identities, err := snap.Identities(filter.Any) - suite.Require().NoError(err) - - block := unittest.BlockWithParentFixture(lastBlock) - blockID := block.ID() - suite.state.On("AtBlockID", blockID).Return( - unittest.StateSnapshotForKnownBlock(block.Header, identities.Lookup()), nil).Once() - - // block storage returns the ErrNotFound error - suite.blocks. - On("ByID", blockID). - Return(nil, storage.ErrNotFound). - Once() - - receipt1 := unittest.ReceiptForBlockFixture(block) - suite.receipts. - On("ByBlockID", block.ID()). - Return(flow.ExecutionReceiptList{receipt1}, nil) - - params := suite.defaultBackendParams() - - backend, err := New(params) - suite.Require().NoError(err) - - // Make the call for the system transaction result - res, err := backend.GetSystemTransactionResult( - context.Background(), - block.ID(), - entities.EventEncodingVersion_JSON_CDC_V0, - ) - - suite.Require().Nil(res) - suite.Require().Error(err) - suite.Require().Equal(err, status.Errorf(codes.NotFound, "not found: %v", fmt.Errorf("key not found"))) - }) -} - -// TestGetSystemTransactionResult_FailedEncodingConversion tests the GetSystemTransactionResult function with different -// event encoding versions. -func (suite *Suite) TestGetSystemTransactionResult_FailedEncodingConversion() { - suite.withPreConfiguredState(func(snap protocol.Snapshot) { - suite.state.On("Sealed").Return(snap, nil).Maybe() - lastBlock, err := snap.Head() - suite.Require().NoError(err) - identities, err := snap.Identities(filter.Any) - suite.Require().NoError(err) - - block := unittest.BlockWithParentFixture(lastBlock) - blockID := block.ID() - suite.state.On("AtBlockID", blockID).Return( - unittest.StateSnapshotForKnownBlock(block.Header, identities.Lookup()), nil).Once() - - // block storage returns the corresponding block - suite.blocks. - On("ByID", blockID). - Return(block, nil). - Once() - - receipt1 := unittest.ReceiptForBlockFixture(block) - suite.receipts. - On("ByBlockID", block.ID()). - Return(flow.ExecutionReceiptList{receipt1}, nil) - - // the connection factory should be used to get the execution node client - params := suite.defaultBackendParams() - params.ConnFactory = suite.setupConnectionFactory() - - exeEventReq := &execproto.GetTransactionsByBlockIDRequest{ - BlockId: blockID[:], - } - - // create empty events - eventsPerBlock := 10 - eventMessages := make([]*entities.Event, eventsPerBlock) - - exeEventResp := &execproto.GetTransactionResultsResponse{ - TransactionResults: []*execproto.GetTransactionResultResponse{{ - Events: eventMessages, - EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, - }}, - } - - suite.execClient. - On("GetTransactionResultsByBlockID", mock.Anything, exeEventReq). - Return(exeEventResp, nil). - Once() - - backend, err := New(params) - suite.Require().NoError(err) - - suite.execClient. - On("GetTransactionResult", mock.Anything, mock.AnythingOfType("*execution.GetTransactionResultRequest")). - Return(exeEventResp.TransactionResults[0], nil). - Once() - - // Make the call for the system transaction result - res, err := backend.GetSystemTransactionResult( - context.Background(), - block.ID(), - entities.EventEncodingVersion_CCF_V0, - ) - - suite.Require().Nil(res) - suite.Require().Error(err) - suite.Require().Equal(err, status.Errorf(codes.Internal, "failed to convert events to message: %v", - fmt.Errorf("conversion from format JSON_CDC_V0 to CCF_V0 is not supported"))) - }) -} - -func (suite *Suite) assertTransactionResultResponse( - err error, - response *accessmodel.TransactionResult, - block flow.Block, - txId flow.Identifier, - txFailed bool, - eventsForTx []flow.Event, -) { - suite.Require().NoError(err) - suite.Assert().Equal(block.ID(), response.BlockID) - suite.Assert().Equal(block.Header.Height, response.BlockHeight) - suite.Assert().Equal(txId, response.TransactionID) - if txId == suite.systemTx.ID() { - suite.Assert().Equal(flow.ZeroID, response.CollectionID) - } else { - suite.Assert().Equal(block.Payload.Guarantees[0].CollectionID, response.CollectionID) - } - suite.Assert().Equal(len(eventsForTx), len(response.Events)) - // When there are error messages occurred in the transaction, the status should be 1 - if txFailed { - suite.Assert().Equal(uint(1), response.StatusCode) - suite.Assert().Equal(expectedErrorMsg, response.ErrorMessage) - } else { - suite.Assert().Equal(uint(0), response.StatusCode) - suite.Assert().Equal("", response.ErrorMessage) - } - suite.Assert().Equal(flow.TransactionStatusSealed, response.Status) -} - -// TestTransactionResultFromStorage tests the retrieval of a transaction result (flow.TransactionResult) from storage -// instead of requesting it from the Execution Node. -func (suite *Suite) TestTransactionResultFromStorage() { - // Create fixtures for block, transaction, and collection - block := unittest.BlockFixture() - transaction := unittest.TransactionFixture() - col := flow.CollectionFromTransactions([]*flow.Transaction{&transaction}) - guarantee := col.Guarantee() - block.SetPayload(unittest.PayloadFixture(unittest.WithGuarantees(&guarantee))) - txId := transaction.ID() - blockId := block.ID() - - // Mock the behavior of the blocks and transactionResults objects - suite.blocks. - On("ByID", blockId). - Return(&block, nil) - - suite.transactionResults.On("ByBlockIDTransactionID", blockId, txId). - Return(&flow.LightTransactionResult{ - TransactionID: txId, - Failed: true, - ComputationUsed: 0, - }, nil) - - suite.transactions. - On("ByID", txId). - Return(&transaction.TransactionBody, nil) - - // Set up the light collection and mock the behavior of the collections object - lightCol := col.Light() - suite.collections.On("LightByID", col.ID()).Return(&lightCol, nil) - - // Set up the events storage mock - totalEvents := 5 - eventsForTx := unittest.EventsFixture(totalEvents, flow.EventAccountCreated) - eventMessages := make([]*entities.Event, totalEvents) - for j, event := range eventsForTx { - eventMessages[j] = convert.EventToMessage(event) - } - - // expect a call to lookup events by block ID and transaction ID - suite.events.On("ByBlockIDTransactionID", blockId, txId).Return(eventsForTx, nil) - - // Set up the state and snapshot mocks - _, fixedENIDs := suite.setupReceipts(&block) - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() - suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) - suite.snapshot.On("Head", mock.Anything).Return(block.Header, nil) - - // create a mock index reporter - reporter := syncmock.NewIndexReporter(suite.T()) - reporter.On("LowestIndexedHeight").Return(block.Header.Height, nil) - reporter.On("HighestIndexedHeight").Return(block.Header.Height+10, nil) - - indexReporter := index.NewReporter() - err := indexReporter.Initialize(reporter) - suite.Require().NoError(err) - - suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() - - // Set up the backend parameters and the backend instance - params := suite.defaultBackendParams() - // the connection factory should be used to get the execution node client - params.ConnFactory = suite.setupConnectionFactory() - params.TxResultQueryMode = IndexQueryModeLocalOnly - params.EventsIndex = index.NewEventsIndex(indexReporter, suite.events) - params.TxResultsIndex = index.NewTransactionResultsIndex(indexReporter, suite.transactionResults) - - backend, err := New(params) - suite.Require().NoError(err) - - // Set up the expected error message for the execution node response - - exeEventReq := &execproto.GetTransactionErrorMessageRequest{ - BlockId: blockId[:], - TransactionId: txId[:], - } - - exeEventResp := &execproto.GetTransactionErrorMessageResponse{ - TransactionId: txId[:], - ErrorMessage: expectedErrorMsg, - } - - suite.execClient.On("GetTransactionErrorMessage", mock.Anything, exeEventReq).Return(exeEventResp, nil).Once() - - response, err := backend.GetTransactionResult(context.Background(), txId, blockId, flow.ZeroID, entities.EventEncodingVersion_JSON_CDC_V0) - suite.assertTransactionResultResponse(err, response, block, txId, true, eventsForTx) -} - -// TestTransactionByIndexFromStorage tests the retrieval of a transaction result (flow.TransactionResult) by index -// and returns it from storage instead of requesting from the Execution Node. -func (suite *Suite) TestTransactionByIndexFromStorage() { - // Create fixtures for block, transaction, and collection - block := unittest.BlockFixture() - transaction := unittest.TransactionFixture() - col := flow.CollectionFromTransactions([]*flow.Transaction{&transaction}) - guarantee := col.Guarantee() - block.SetPayload(unittest.PayloadFixture(unittest.WithGuarantees(&guarantee))) - blockId := block.ID() - txId := transaction.ID() - txIndex := rand.Uint32() - - // Set up the light collection and mock the behavior of the collections object - lightCol := col.Light() - suite.collections.On("LightByID", col.ID()).Return(&lightCol, nil) - - // Mock the behavior of the blocks and transactionResults objects - suite.blocks. - On("ByID", blockId). - Return(&block, nil) - - suite.transactionResults.On("ByBlockIDTransactionIndex", blockId, txIndex). - Return(&flow.LightTransactionResult{ - TransactionID: txId, - Failed: true, - ComputationUsed: 0, - }, nil) - - // Set up the events storage mock - totalEvents := 5 - eventsForTx := unittest.EventsFixture(totalEvents, flow.EventAccountCreated) - eventMessages := make([]*entities.Event, totalEvents) - for j, event := range eventsForTx { - eventMessages[j] = convert.EventToMessage(event) - } - - // expect a call to lookup events by block ID and transaction ID - suite.events.On("ByBlockIDTransactionIndex", blockId, txIndex).Return(eventsForTx, nil) - - // Set up the state and snapshot mocks - _, fixedENIDs := suite.setupReceipts(&block) - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() - suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) - suite.snapshot.On("Head", mock.Anything).Return(block.Header, nil) - - // create a mock index reporter - reporter := syncmock.NewIndexReporter(suite.T()) - reporter.On("LowestIndexedHeight").Return(block.Header.Height, nil) - reporter.On("HighestIndexedHeight").Return(block.Header.Height+10, nil) - - indexReporter := index.NewReporter() - err := indexReporter.Initialize(reporter) - suite.Require().NoError(err) - - suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() - - // Set up the backend parameters and the backend instance - params := suite.defaultBackendParams() - // the connection factory should be used to get the execution node client - params.ConnFactory = suite.setupConnectionFactory() - params.TxResultQueryMode = IndexQueryModeLocalOnly - params.EventsIndex = index.NewEventsIndex(indexReporter, suite.events) - params.TxResultsIndex = index.NewTransactionResultsIndex(indexReporter, suite.transactionResults) - - backend, err := New(params) - suite.Require().NoError(err) - - // Set up the expected error message for the execution node response - exeEventReq := &execproto.GetTransactionErrorMessageByIndexRequest{ - BlockId: blockId[:], - Index: txIndex, - } - - exeEventResp := &execproto.GetTransactionErrorMessageResponse{ - TransactionId: txId[:], - ErrorMessage: expectedErrorMsg, - } - - suite.execClient.On("GetTransactionErrorMessageByIndex", mock.Anything, exeEventReq).Return(exeEventResp, nil).Once() - - response, err := backend.GetTransactionResultByIndex(context.Background(), blockId, txIndex, entities.EventEncodingVersion_JSON_CDC_V0) - suite.assertTransactionResultResponse(err, response, block, txId, true, eventsForTx) -} - -// TestTransactionResultsByBlockIDFromStorage tests the retrieval of transaction results ([]flow.TransactionResult) -// by block ID from storage instead of requesting from the Execution Node. -func (suite *Suite) TestTransactionResultsByBlockIDFromStorage() { - // Create fixtures for the block and collection - block := unittest.BlockFixture() - col := unittest.CollectionFixture(2) - guarantee := col.Guarantee() - block.SetPayload(unittest.PayloadFixture(unittest.WithGuarantees(&guarantee))) - blockId := block.ID() - - // Mock the behavior of the blocks, collections and light transaction results objects - suite.blocks. - On("ByID", blockId). - Return(&block, nil) - lightCol := col.Light() - suite.collections.On("LightByID", mock.Anything).Return(&lightCol, nil) - - lightTxResults := make([]flow.LightTransactionResult, len(lightCol.Transactions)) - for i, txID := range lightCol.Transactions { - lightTxResults[i] = flow.LightTransactionResult{ - TransactionID: txID, - Failed: false, - ComputationUsed: 0, - } - } - // simulate the system tx - lightTxResults = append(lightTxResults, flow.LightTransactionResult{ - TransactionID: suite.systemTx.ID(), - Failed: false, - ComputationUsed: 10, - }) - - // Mark the first transaction as failed - lightTxResults[0].Failed = true - suite.transactionResults.On("ByBlockID", blockId).Return(lightTxResults, nil) - - // Set up the events storage mock - totalEvents := 5 - eventsForTx := unittest.EventsFixture(totalEvents, flow.EventAccountCreated) - eventMessages := make([]*entities.Event, totalEvents) - for j, event := range eventsForTx { - eventMessages[j] = convert.EventToMessage(event) - } - - // expect a call to lookup events by block ID and transaction ID - suite.events.On("ByBlockIDTransactionID", blockId, mock.Anything).Return(eventsForTx, nil) - - // Set up the state and snapshot mocks - _, fixedENIDs := suite.setupReceipts(&block) - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() - suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) - suite.snapshot.On("Head", mock.Anything).Return(block.Header, nil) - - // create a mock index reporter - reporter := syncmock.NewIndexReporter(suite.T()) - reporter.On("LowestIndexedHeight").Return(block.Header.Height, nil) - reporter.On("HighestIndexedHeight").Return(block.Header.Height+10, nil) - - indexReporter := index.NewReporter() - err := indexReporter.Initialize(reporter) - suite.Require().NoError(err) - - suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() - - // Set up the state and snapshot mocks and the backend instance - params := suite.defaultBackendParams() - // the connection factory should be used to get the execution node client - params.ConnFactory = suite.setupConnectionFactory() - params.EventsIndex = index.NewEventsIndex(indexReporter, suite.events) - params.TxResultsIndex = index.NewTransactionResultsIndex(indexReporter, suite.transactionResults) - params.TxResultQueryMode = IndexQueryModeLocalOnly - - backend, err := New(params) - suite.Require().NoError(err) - - // Set up the expected error message for the execution node response - exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ - BlockId: blockId[:], - } - - res := &execproto.GetTransactionErrorMessagesResponse_Result{ - TransactionId: lightTxResults[0].TransactionID[:], - ErrorMessage: expectedErrorMsg, - Index: 1, - } - exeEventResp := &execproto.GetTransactionErrorMessagesResponse{ - Results: []*execproto.GetTransactionErrorMessagesResponse_Result{ - res, - }, - } - - suite.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq).Return(exeEventResp, nil).Once() - - response, err := backend.GetTransactionResultsByBlockID(context.Background(), blockId, entities.EventEncodingVersion_JSON_CDC_V0) - suite.Require().NoError(err) - suite.Assert().Equal(len(lightTxResults), len(response)) - - // Assertions for each transaction result in the response - for i, responseResult := range response { - lightTx := lightTxResults[i] - suite.assertTransactionResultResponse(err, responseResult, block, lightTx.TransactionID, lightTx.Failed, eventsForTx) - } -} diff --git a/engine/access/rpc/backend/common/consts.go b/engine/access/rpc/backend/common/consts.go new file mode 100644 index 00000000000..f4a379a8e00 --- /dev/null +++ b/engine/access/rpc/backend/common/consts.go @@ -0,0 +1,5 @@ +package common + +// DefaultLoggedScriptsCacheSize is the default size of the lookup cache used to dedupe logs of scripts sent to ENs +// limiting cache size to 16MB and does not affect script execution, only for keeping logs tidy +const DefaultLoggedScriptsCacheSize = 1_000_000 diff --git a/engine/access/rpc/backend/errors.go b/engine/access/rpc/backend/common/errors.go similarity index 65% rename from engine/access/rpc/backend/errors.go rename to engine/access/rpc/backend/common/errors.go index 4752c6563ce..51de9fbfcba 100644 --- a/engine/access/rpc/backend/errors.go +++ b/engine/access/rpc/backend/common/errors.go @@ -1,4 +1,4 @@ -package backend +package common import ( "errors" @@ -7,12 +7,18 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// InsufficientExecutionReceipts indicates that no execution receipt were found for a given block ID +// InsufficientExecutionReceipts indicates that no execution receipts were found for a given block ID type InsufficientExecutionReceipts struct { blockID flow.Identifier receiptCount int } +func NewInsufficientExecutionReceipts(blockID flow.Identifier, receiptCount int) InsufficientExecutionReceipts { + return InsufficientExecutionReceipts{blockID: blockID, receiptCount: receiptCount} +} + +var _ error = (*InsufficientExecutionReceipts)(nil) + func (e InsufficientExecutionReceipts) Error() string { return fmt.Sprintf("insufficient execution receipts found (%d) for block ID: %s", e.receiptCount, e.blockID.String()) } diff --git a/engine/access/rpc/backend/common/height_error.go b/engine/access/rpc/backend/common/height_error.go new file mode 100644 index 00000000000..d9759cc7f2a --- /dev/null +++ b/engine/access/rpc/backend/common/height_error.go @@ -0,0 +1,45 @@ +package common + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// ResolveHeightError processes errors returned during height-based queries. +// If the error is due to a block not being found, this function determines whether the queried +// height falls outside the node's accessible range and provides context-sensitive error messages +// based on spork and node root block heights. +// +// Expected errors during normal operation: +// - storage.ErrNotFound - Indicates that the queried block does not exist in the local database. +func ResolveHeightError( + stateParams protocol.Params, + height uint64, + genericErr error, +) error { + if !errors.Is(genericErr, storage.ErrNotFound) { + return genericErr + } + + sporkRootBlockHeight := stateParams.SporkRootBlockHeight() + nodeRootBlockHeader := stateParams.SealedRoot().Height + + if height < sporkRootBlockHeight { + return fmt.Errorf("block height %d is less than the spork root block height %d. Try to use a historic node: %w", + height, + sporkRootBlockHeight, + genericErr, + ) + } else if height < nodeRootBlockHeader { + return fmt.Errorf("block height %d is less than the node's root block height %d. Try to use a different Access node: %w", + height, + nodeRootBlockHeader, + genericErr, + ) + } else { + return genericErr + } +} diff --git a/engine/access/rpc/backend/config.go b/engine/access/rpc/backend/config.go index 086af0035a7..cc4f4d157fe 100644 --- a/engine/access/rpc/backend/config.go +++ b/engine/access/rpc/backend/config.go @@ -1,7 +1,6 @@ package backend import ( - "errors" "time" "github.com/onflow/flow-go/engine/access/rpc/connection" @@ -20,53 +19,3 @@ type Config struct { EventQueryMode string // the mode in which events are queried TxResultQueryMode string // the mode in which tx results are queried } - -type IndexQueryMode int - -const ( - // IndexQueryModeLocalOnly executes scripts and gets accounts using only local storage - IndexQueryModeLocalOnly IndexQueryMode = iota + 1 - - // IndexQueryModeExecutionNodesOnly executes scripts and gets accounts using only - // execution nodes - IndexQueryModeExecutionNodesOnly - - // IndexQueryModeFailover executes scripts and gets accounts using local storage first, - // then falls back to execution nodes if data is not available for the height or if request - // failed due to a non-user error. - IndexQueryModeFailover - - // IndexQueryModeCompare executes scripts and gets accounts using both local storage and - // execution nodes and compares the results. The execution node result is always returned. - IndexQueryModeCompare -) - -func ParseIndexQueryMode(s string) (IndexQueryMode, error) { - switch s { - case IndexQueryModeLocalOnly.String(): - return IndexQueryModeLocalOnly, nil - case IndexQueryModeExecutionNodesOnly.String(): - return IndexQueryModeExecutionNodesOnly, nil - case IndexQueryModeFailover.String(): - return IndexQueryModeFailover, nil - case IndexQueryModeCompare.String(): - return IndexQueryModeCompare, nil - default: - return 0, errors.New("invalid script execution mode") - } -} - -func (m IndexQueryMode) String() string { - switch m { - case IndexQueryModeLocalOnly: - return "local-only" - case IndexQueryModeExecutionNodesOnly: - return "execution-nodes-only" - case IndexQueryModeFailover: - return "failover" - case IndexQueryModeCompare: - return "compare" - default: - return "" - } -} diff --git a/engine/access/rpc/backend/events/events.go b/engine/access/rpc/backend/events/events.go new file mode 100644 index 00000000000..4b98a4239b1 --- /dev/null +++ b/engine/access/rpc/backend/events/events.go @@ -0,0 +1,197 @@ +package events + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/events/provider" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/events" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// DefaultMaxHeightRange is the default maximum size of range requests. +const DefaultMaxHeightRange = 250 + +type Events struct { + headers storage.Headers + state protocol.State + chain flow.Chain + maxHeightRange uint + provider provider.EventProvider +} + +var _ access.EventsAPI = (*Events)(nil) + +func NewEventsBackend( + log zerolog.Logger, + state protocol.State, + chain flow.Chain, + maxHeightRange uint, + headers storage.Headers, + connFactory connection.ConnectionFactory, + nodeCommunicator node_communicator.Communicator, + queryMode query_mode.IndexQueryMode, + eventsIndex *index.EventsIndex, + execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider, +) (*Events, error) { + var eventProvider provider.EventProvider + + switch queryMode { + case query_mode.IndexQueryModeLocalOnly: + eventProvider = provider.NewLocalEventProvider(eventsIndex) + + case query_mode.IndexQueryModeExecutionNodesOnly: + eventProvider = provider.NewENEventProvider(log, execNodeIdentitiesProvider, connFactory, nodeCommunicator) + + case query_mode.IndexQueryModeFailover: + local := provider.NewLocalEventProvider(eventsIndex) + execNode := provider.NewENEventProvider(log, execNodeIdentitiesProvider, connFactory, nodeCommunicator) + eventProvider = provider.NewFailoverEventProvider(log, local, execNode) + + default: + return nil, fmt.Errorf("unknown execution mode: %v", queryMode) + } + + return &Events{ + state: state, + chain: chain, + maxHeightRange: maxHeightRange, + headers: headers, + provider: eventProvider, + }, nil +} + +// GetEventsForHeightRange retrieves events for all sealed blocks between the start block height and +// the end block height (inclusive) that have the given type. +func (e *Events) GetEventsForHeightRange( + ctx context.Context, + eventType string, + startHeight, endHeight uint64, + requiredEventEncodingVersion entities.EventEncodingVersion, +) ([]flow.BlockEvents, error) { + if _, err := events.ValidateEvent(flow.EventType(eventType), e.chain); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid event type: %v", err) + } + + if endHeight < startHeight { + return nil, status.Error(codes.InvalidArgument, "start height must not be larger than end height") + } + + rangeSize := endHeight - startHeight + 1 // range is inclusive on both ends + if rangeSize > uint64(e.maxHeightRange) { + return nil, status.Errorf(codes.InvalidArgument, + "requested block range (%d) exceeded maximum (%d)", rangeSize, e.maxHeightRange) + } + + // get the latest sealed block header + sealed, err := e.state.Sealed().Head() + if err != nil { + // sealed block must be in the store, so throw an exception for any error + err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, err + } + + // start height should not be beyond the last sealed height + if startHeight > sealed.Height { + return nil, status.Errorf(codes.OutOfRange, + "start height %d is greater than the last sealed block height %d", startHeight, sealed.Height) + } + + // limit max height to last sealed block in the chain + // + // Note: this causes unintuitive behavior for clients making requests through a proxy that + // fronts multiple nodes. With that setup, clients may receive responses for a smaller range + // than requested because the node serving the request has a slightly delayed view of the chain. + // + // An alternative option is to return an error here, but that's likely to cause more pain for + // these clients since the requests would intermittently fail. it's recommended instead to + // check the block height of the last message in the response. this will be the last block + // height searched, and can be used to determine the start height for the next range. + if endHeight > sealed.Height { + endHeight = sealed.Height + } + + // find the block headers for all the blocks between min and max height (inclusive) + blockHeaders := make([]provider.BlockMetadata, 0, endHeight-startHeight+1) + + for i := startHeight; i <= endHeight; i++ { + // this looks inefficient, but is actually what's done under the covers by `headers.ByHeight` + // and avoids calculating header.ID() for each block. + blockID, err := e.headers.BlockIDByHeight(i) + if err != nil { + return nil, rpc.ConvertStorageError(common.ResolveHeightError(e.state.Params(), i, err)) + } + header, err := e.headers.ByBlockID(blockID) + if err != nil { + return nil, rpc.ConvertStorageError(fmt.Errorf("failed to get block header for %d: %w", i, err)) + } + + blockHeaders = append(blockHeaders, provider.BlockMetadata{ + ID: blockID, + Height: header.Height, + Timestamp: header.Timestamp, + }) + } + + resp, err := e.provider.Events(ctx, blockHeaders, flow.EventType(eventType), requiredEventEncodingVersion) + if err != nil { + return nil, err + } + + return resp.Events, nil +} + +// GetEventsForBlockIDs retrieves events for all the specified block IDs that have the given type +func (e *Events) GetEventsForBlockIDs( + ctx context.Context, + eventType string, + blockIDs []flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) ([]flow.BlockEvents, error) { + if _, err := events.ValidateEvent(flow.EventType(eventType), e.chain); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid event type: %v", err) + } + + if uint(len(blockIDs)) > e.maxHeightRange { + return nil, status.Errorf(codes.InvalidArgument, "requested block range (%d) exceeded maximum (%d)", len(blockIDs), e.maxHeightRange) + } + + // find the block headers for all the block IDs + blockHeaders := make([]provider.BlockMetadata, 0, len(blockIDs)) + for _, blockID := range blockIDs { + header, err := e.headers.ByBlockID(blockID) + if err != nil { + return nil, rpc.ConvertStorageError(fmt.Errorf("failed to get block header for %s: %w", blockID, err)) + } + + blockHeaders = append(blockHeaders, provider.BlockMetadata{ + ID: blockID, + Height: header.Height, + Timestamp: header.Timestamp, + }) + } + + resp, err := e.provider.Events(ctx, blockHeaders, flow.EventType(eventType), requiredEventEncodingVersion) + if err != nil { + return nil, err + } + + return resp.Events, nil +} diff --git a/engine/access/rpc/backend/backend_events_test.go b/engine/access/rpc/backend/events/events_test.go similarity index 84% rename from engine/access/rpc/backend/backend_events_test.go rename to engine/access/rpc/backend/events/events_test.go index 11b2d52ea99..b8718cd4f2e 100644 --- a/engine/access/rpc/backend/backend_events_test.go +++ b/engine/access/rpc/backend/events/events_test.go @@ -1,4 +1,4 @@ -package backend +package events import ( "bytes" @@ -9,6 +9,7 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -20,6 +21,8 @@ import ( "github.com/onflow/flow-go/engine/access/index" access "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/engine/common/rpc/convert" @@ -38,10 +41,10 @@ var targetEvent string type testCase struct { encoding entities.EventEncodingVersion - queryMode IndexQueryMode + queryMode query_mode.IndexQueryMode } -type BackendEventsSuite struct { +type EventsSuite struct { suite.Suite log zerolog.Logger @@ -69,10 +72,10 @@ type BackendEventsSuite struct { } func TestBackendEventsSuite(t *testing.T) { - suite.Run(t, new(BackendEventsSuite)) + suite.Run(t, new(EventsSuite)) } -func (s *BackendEventsSuite) SetupTest() { +func (s *EventsSuite) SetupTest() { s.log = unittest.Logger() s.state = protocol.NewState(s.T()) s.snapshot = protocol.NewSnapshot(s.T()) @@ -162,10 +165,10 @@ func (s *BackendEventsSuite) SetupTest() { entities.EventEncodingVersion_CCF_V0, entities.EventEncodingVersion_JSON_CDC_V0, } { - for _, queryMode := range []IndexQueryMode{ - IndexQueryModeExecutionNodesOnly, - IndexQueryModeLocalOnly, - IndexQueryModeFailover, + for _, queryMode := range []query_mode.IndexQueryMode{ + query_mode.IndexQueryModeExecutionNodesOnly, + query_mode.IndexQueryModeLocalOnly, + query_mode.IndexQueryModeFailover, } { s.testCases = append(s.testCases, testCase{ encoding: encoding, @@ -175,99 +178,9 @@ func (s *BackendEventsSuite) SetupTest() { } } -func (s *BackendEventsSuite) defaultBackend() *backendEvents { - return &backendEvents{ - log: s.log, - chain: s.chainID.Chain(), - state: s.state, - headers: s.headers, - connFactory: s.connectionFactory, - nodeCommunicator: NewNodeCommunicator(false), - maxHeightRange: DefaultMaxHeightRange, - queryMode: IndexQueryModeExecutionNodesOnly, - eventsIndex: s.eventsIndex, - execNodeIdentitiesProvider: commonrpc.NewExecutionNodeIdentitiesProvider( - s.log, - s.state, - s.receipts, - flow.IdentifierList{}, - flow.IdentifierList{}, - ), - } -} - -// setupExecutionNodes sets up the mocks required to test against an EN backend -func (s *BackendEventsSuite) setupExecutionNodes(block *flow.Block) { - s.params.On("FinalizedRoot").Return(s.rootHeader, nil) - s.state.On("Params").Return(s.params) - s.state.On("Final").Return(s.snapshot) - s.snapshot.On("Identities", mock.Anything).Return(s.executionNodes, nil) - - // this line causes a S1021 lint error because receipts is explicitly declared. this is required - // to ensure the mock library handles the response type correctly - var receipts flow.ExecutionReceiptList //nolint:gosimple - receipts = unittest.ReceiptsForBlockFixture(block, s.executionNodes.NodeIDs()) - s.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) - - s.connectionFactory.On("GetExecutionAPIClient", mock.Anything). - Return(s.execClient, &mocks.MockCloser{}, nil) -} - -// setupENSuccessResponse configures the execution node client to return a successful response -func (s *BackendEventsSuite) setupENSuccessResponse(eventType string, blocks []*flow.Block) { - s.setupExecutionNodes(blocks[len(blocks)-1]) - - ids := make([][]byte, len(blocks)) - results := make([]*execproto.GetEventsForBlockIDsResponse_Result, len(blocks)) - - events := make([]*entities.Event, 0) - for _, event := range s.blockEvents { - if string(event.Type) == eventType { - events = append(events, convert.EventToMessage(event)) - } - } - - for i, block := range blocks { - id := block.ID() - ids[i] = id[:] - results[i] = &execproto.GetEventsForBlockIDsResponse_Result{ - BlockId: id[:], - BlockHeight: block.Header.Height, - Events: events, - } - } - expectedExecRequest := &execproto.GetEventsForBlockIDsRequest{ - Type: eventType, - BlockIds: ids, - } - expectedResponse := &execproto.GetEventsForBlockIDsResponse{ - Results: results, - EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, - } - - s.execClient.On("GetEventsForBlockIDs", mock.Anything, expectedExecRequest). - Return(expectedResponse, nil) -} - -// setupENFailingResponse configures the execution node client to return an error -func (s *BackendEventsSuite) setupENFailingResponse(eventType string, headers []*flow.Header, err error) { - ids := make([][]byte, len(headers)) - for i, header := range headers { - id := header.ID() - ids[i] = id[:] - } - failingRequest := &execproto.GetEventsForBlockIDsRequest{ - Type: eventType, - BlockIds: ids, - } - - s.execClient.On("GetEventsForBlockIDs", mock.Anything, failingRequest). - Return(nil, err) -} - // TestGetEvents_HappyPaths tests the happy paths for GetEventsForBlockIDs and GetEventsForHeightRange // across all queryModes and encodings -func (s *BackendEventsSuite) TestGetEvents_HappyPaths() { +func (s *EventsSuite) TestGetEvents_HappyPaths() { ctx := context.Background() startHeight := s.blocks[0].Header.Height @@ -283,8 +196,7 @@ func (s *BackendEventsSuite) TestGetEvents_HappyPaths() { s.snapshot.On("Head").Return(s.sealedHead, nil) s.Run("GetEventsForHeightRange - end height updated", func() { - backend := s.defaultBackend() - backend.queryMode = IndexQueryModeFailover + backend := s.defaultBackend(query_mode.IndexQueryModeFailover, s.eventsIndex) endHeight := startHeight + 20 // should still return 5 responses encoding := entities.EventEncodingVersion_CCF_V0 @@ -297,15 +209,14 @@ func (s *BackendEventsSuite) TestGetEvents_HappyPaths() { for _, tt := range s.testCases { s.Run(fmt.Sprintf("all from storage - %s - %s", tt.encoding.String(), tt.queryMode), func() { switch tt.queryMode { - case IndexQueryModeExecutionNodesOnly: + case query_mode.IndexQueryModeExecutionNodesOnly: // not applicable return - case IndexQueryModeLocalOnly, IndexQueryModeFailover: + case query_mode.IndexQueryModeLocalOnly, query_mode.IndexQueryModeFailover: // only calls to local storage } - backend := s.defaultBackend() - backend.queryMode = tt.queryMode + backend := s.defaultBackend(tt.queryMode, s.eventsIndex) response, err := backend.GetEventsForBlockIDs(ctx, targetEvent, s.blockIDs, tt.encoding) s.Require().NoError(err) @@ -321,20 +232,17 @@ func (s *BackendEventsSuite) TestGetEvents_HappyPaths() { eventsIndex := index.NewEventsIndex(index.NewReporter(), events) switch tt.queryMode { - case IndexQueryModeLocalOnly: + case query_mode.IndexQueryModeLocalOnly: // not applicable return - case IndexQueryModeExecutionNodesOnly: + case query_mode.IndexQueryModeExecutionNodesOnly: // only calls to EN, no calls to storage - case IndexQueryModeFailover: + case query_mode.IndexQueryModeFailover: // all calls to storage fail // simulated by not initializing the eventIndex so all calls return ErrIndexNotInitialized } - backend := s.defaultBackend() - backend.queryMode = tt.queryMode - backend.eventsIndex = eventsIndex - + backend := s.defaultBackend(tt.queryMode, eventsIndex) s.setupENSuccessResponse(targetEvent, s.blocks) response, err := backend.GetEventsForBlockIDs(ctx, targetEvent, s.blockIDs, tt.encoding) @@ -351,10 +259,10 @@ func (s *BackendEventsSuite) TestGetEvents_HappyPaths() { eventsIndex := index.NewEventsIndex(index.NewReporter(), events) switch tt.queryMode { - case IndexQueryModeLocalOnly, IndexQueryModeExecutionNodesOnly: + case query_mode.IndexQueryModeLocalOnly, query_mode.IndexQueryModeExecutionNodesOnly: // not applicable return - case IndexQueryModeFailover: + case query_mode.IndexQueryModeFailover: // only failing blocks queried from EN s.setupENSuccessResponse(targetEvent, []*flow.Block{s.blocks[0], s.blocks[4]}) } @@ -371,10 +279,7 @@ func (s *BackendEventsSuite) TestGetEvents_HappyPaths() { err := eventsIndex.Initialize(reporter) s.Require().NoError(err) - backend := s.defaultBackend() - backend.queryMode = tt.queryMode - backend.eventsIndex = eventsIndex - + backend := s.defaultBackend(tt.queryMode, eventsIndex) response, err := backend.GetEventsForBlockIDs(ctx, targetEvent, s.blockIDs, tt.encoding) s.Require().NoError(err) s.assertResponse(response, tt.encoding) @@ -386,7 +291,7 @@ func (s *BackendEventsSuite) TestGetEvents_HappyPaths() { } } -func (s *BackendEventsSuite) TestGetEventsForHeightRange_HandlesErrors() { +func (s *EventsSuite) TestGetEventsForHeightRange_HandlesErrors() { ctx := context.Background() startHeight := s.blocks[0].Header.Height @@ -394,7 +299,7 @@ func (s *BackendEventsSuite) TestGetEventsForHeightRange_HandlesErrors() { encoding := entities.EventEncodingVersion_CCF_V0 s.Run("returns error for endHeight < startHeight", func() { - backend := s.defaultBackend() + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) endHeight := startHeight - 1 response, err := backend.GetEventsForHeightRange(ctx, targetEvent, startHeight, endHeight, encoding) @@ -403,7 +308,7 @@ func (s *BackendEventsSuite) TestGetEventsForHeightRange_HandlesErrors() { }) s.Run("returns error for range larger than max", func() { - backend := s.defaultBackend() + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) endHeight := startHeight + DefaultMaxHeightRange response, err := backend.GetEventsForHeightRange(ctx, targetEvent, startHeight, endHeight, encoding) @@ -419,8 +324,7 @@ func (s *BackendEventsSuite) TestGetEventsForHeightRange_HandlesErrors() { signalerCtx := irrecoverable.WithSignalerContext(context.Background(), irrecoverable.NewMockSignalerContextExpectError(s.T(), ctx, signCtxErr)) - backend := s.defaultBackend() - + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) response, err := backend.GetEventsForHeightRange(signalerCtx, targetEvent, startHeight, endHeight, encoding) // these will never be returned in production s.Assert().Equal(codes.Unknown, status.Code(err)) @@ -431,10 +335,10 @@ func (s *BackendEventsSuite) TestGetEventsForHeightRange_HandlesErrors() { s.snapshot.On("Head").Return(s.sealedHead, nil) s.Run("returns error for startHeight > sealed height", func() { - backend := s.defaultBackend() startHeight := s.sealedHead.Height + 1 endHeight := startHeight + 1 + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) response, err := backend.GetEventsForHeightRange(ctx, targetEvent, startHeight, endHeight, encoding) s.Assert().Equal(codes.OutOfRange, status.Code(err)) s.Assert().Nil(response) @@ -443,14 +347,13 @@ func (s *BackendEventsSuite) TestGetEventsForHeightRange_HandlesErrors() { s.state.On("Params").Return(s.params) s.Run("returns error for startHeight < spork root height", func() { - backend := s.defaultBackend() - sporkRootHeight := s.blocks[0].Header.Height - 10 startHeight := sporkRootHeight - 1 s.params.On("SporkRootBlockHeight").Return(sporkRootHeight).Once() s.params.On("SealedRoot").Return(s.rootHeader, nil).Once() + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) response, err := backend.GetEventsForHeightRange(ctx, targetEvent, startHeight, endHeight, encoding) s.Assert().Equal(codes.NotFound, status.Code(err)) s.Assert().ErrorContains(err, "Try to use a historic node") @@ -458,7 +361,7 @@ func (s *BackendEventsSuite) TestGetEventsForHeightRange_HandlesErrors() { }) s.Run("returns error for startHeight < node root height", func() { - backend := s.defaultBackend() + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) sporkRootHeight := s.blocks[0].Header.Height - 10 nodeRootHeader := unittest.BlockHeaderWithHeight(s.blocks[0].Header.Height) @@ -474,13 +377,13 @@ func (s *BackendEventsSuite) TestGetEventsForHeightRange_HandlesErrors() { }) } -func (s *BackendEventsSuite) TestGetEventsForBlockIDs_HandlesErrors() { +func (s *EventsSuite) TestGetEventsForBlockIDs_HandlesErrors() { ctx := context.Background() encoding := entities.EventEncodingVersion_CCF_V0 s.Run("returns error when too many blockIDs requested", func() { - backend := s.defaultBackend() + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) backend.maxHeightRange = 3 response, err := backend.GetEventsForBlockIDs(ctx, targetEvent, s.blockIDs, encoding) @@ -490,7 +393,7 @@ func (s *BackendEventsSuite) TestGetEventsForBlockIDs_HandlesErrors() { s.Run("returns error for missing header", func() { headers := storagemock.NewHeaders(s.T()) - backend := s.defaultBackend() + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) backend.headers = headers for i, blockID := range s.blockIDs { @@ -509,7 +412,7 @@ func (s *BackendEventsSuite) TestGetEventsForBlockIDs_HandlesErrors() { }) } -func (s *BackendEventsSuite) assertResponse(response []flow.BlockEvents, encoding entities.EventEncodingVersion) { +func (s *EventsSuite) assertResponse(response []flow.BlockEvents, encoding entities.EventEncodingVersion) { s.Assert().Len(response, len(s.blocks)) for i, block := range s.blocks { s.Assert().Equal(block.Header.Height, response[i].BlockHeight) @@ -520,7 +423,7 @@ func (s *BackendEventsSuite) assertResponse(response []flow.BlockEvents, encodin } } -func (s *BackendEventsSuite) assertEncoding(event *flow.Event, encoding entities.EventEncodingVersion) { +func (s *EventsSuite) assertEncoding(event *flow.Event, encoding entities.EventEncodingVersion) { var err error switch encoding { case entities.EventEncodingVersion_CCF_V0: @@ -532,3 +435,96 @@ func (s *BackendEventsSuite) assertEncoding(event *flow.Event, encoding entities } s.Require().NoError(err) } + +func (s *EventsSuite) defaultBackend(mode query_mode.IndexQueryMode, eventsIndex *index.EventsIndex) *Events { + e, err := NewEventsBackend( + s.log, + s.state, + s.chainID.Chain(), + DefaultMaxHeightRange, + s.headers, + s.connectionFactory, + node_communicator.NewNodeCommunicator(false), + mode, + eventsIndex, + commonrpc.NewExecutionNodeIdentitiesProvider( + s.log, + s.state, + s.receipts, + flow.IdentifierList{}, + flow.IdentifierList{}, + )) + + require.NoError(s.T(), err) + + return e +} + +// setupExecutionNodes sets up the mocks required to test against an EN backend +func (s *EventsSuite) setupExecutionNodes(block *flow.Block) { + s.params.On("FinalizedRoot").Return(s.rootHeader, nil) + s.state.On("Params").Return(s.params) + s.state.On("Final").Return(s.snapshot) + s.snapshot.On("Identities", mock.Anything).Return(s.executionNodes, nil) + + // this line causes a S1021 lint error because receipts is explicitly declared. this is required + // to ensure the mock library handles the response type correctly + var receipts flow.ExecutionReceiptList //nolint:gosimple + receipts = unittest.ReceiptsForBlockFixture(block, s.executionNodes.NodeIDs()) + s.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) + + s.connectionFactory.On("GetExecutionAPIClient", mock.Anything). + Return(s.execClient, &mocks.MockCloser{}, nil) +} + +// setupENSuccessResponse configures the execution node client to return a successful response +func (s *EventsSuite) setupENSuccessResponse(eventType string, blocks []*flow.Block) { + s.setupExecutionNodes(blocks[len(blocks)-1]) + + ids := make([][]byte, len(blocks)) + results := make([]*execproto.GetEventsForBlockIDsResponse_Result, len(blocks)) + + events := make([]*entities.Event, 0) + for _, event := range s.blockEvents { + if string(event.Type) == eventType { + events = append(events, convert.EventToMessage(event)) + } + } + + for i, block := range blocks { + id := block.ID() + ids[i] = id[:] + results[i] = &execproto.GetEventsForBlockIDsResponse_Result{ + BlockId: id[:], + BlockHeight: block.Header.Height, + Events: events, + } + } + expectedExecRequest := &execproto.GetEventsForBlockIDsRequest{ + Type: eventType, + BlockIds: ids, + } + expectedResponse := &execproto.GetEventsForBlockIDsResponse{ + Results: results, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + } + + s.execClient.On("GetEventsForBlockIDs", mock.Anything, expectedExecRequest). + Return(expectedResponse, nil) +} + +// setupENFailingResponse configures the execution node client to return an error +func (s *EventsSuite) setupENFailingResponse(eventType string, headers []*flow.Header, err error) { + ids := make([][]byte, len(headers)) + for i, header := range headers { + id := header.ID() + ids[i] = id[:] + } + failingRequest := &execproto.GetEventsForBlockIDsRequest{ + Type: eventType, + BlockIds: ids, + } + + s.execClient.On("GetEventsForBlockIDs", mock.Anything, failingRequest). + Return(nil, err) +} diff --git a/engine/access/rpc/backend/events/provider/execution_node.go b/engine/access/rpc/backend/events/provider/execution_node.go new file mode 100644 index 00000000000..cb224c8a2db --- /dev/null +++ b/engine/access/rpc/backend/events/provider/execution_node.go @@ -0,0 +1,207 @@ +package provider + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "time" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow/protobuf/go/flow/entities" + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" +) + +type ENEventProvider struct { + log zerolog.Logger + nodeProvider *rpc.ExecutionNodeIdentitiesProvider + connFactory connection.ConnectionFactory + nodeCommunicator node_communicator.Communicator +} + +var _ EventProvider = (*ENEventProvider)(nil) + +func NewENEventProvider( + log zerolog.Logger, + nodeProvider *rpc.ExecutionNodeIdentitiesProvider, + connFactory connection.ConnectionFactory, + nodeCommunicator node_communicator.Communicator, +) *ENEventProvider { + return &ENEventProvider{ + log: log.With().Str("event_provider", "execution_node").Logger(), + nodeProvider: nodeProvider, + connFactory: connFactory, + nodeCommunicator: nodeCommunicator, + } +} + +func (e *ENEventProvider) Events( + ctx context.Context, + blocks []BlockMetadata, + eventType flow.EventType, + encoding entities.EventEncodingVersion, +) (Response, error) { + if len(blocks) == 0 { + return Response{}, nil + } + + // create an execution API request for events at block ID + blockIDs := make([]flow.Identifier, len(blocks)) + for i := range blocks { + blockIDs[i] = blocks[i].ID + } + + req := &execproto.GetEventsForBlockIDsRequest{ + Type: string(eventType), + BlockIds: convert.IdentifiersToMessages(blockIDs), + } + + // choose the last block ID to find the list of execution nodes + lastBlockID := blockIDs[len(blockIDs)-1] + + execNodes, err := e.nodeProvider.ExecutionNodesForBlockID( + ctx, + lastBlockID, + ) + if err != nil { + return Response{}, rpc.ConvertError(err, "failed to get execution nodes for events query", codes.Internal) + } + + var resp *execproto.GetEventsForBlockIDsResponse + var successfulNode *flow.IdentitySkeleton + resp, successfulNode, err = e.getEventsFromAnyExeNode(ctx, execNodes, req) + if err != nil { + return Response{}, rpc.ConvertError(err, "failed to get execution nodes for events query", codes.Internal) + } + e.log.Trace(). + Str("execution_id", successfulNode.String()). + Str("last_block_id", lastBlockID.String()). + Msg("successfully got events") + + // convert execution node api result to access node api result + results, err := verifyAndConvertToAccessEvents( + resp.GetResults(), + blocks, + resp.GetEventEncodingVersion(), + encoding, + ) + if err != nil { + return Response{}, status.Errorf(codes.Internal, "failed to verify retrieved events from execution node: %v", err) + } + + return Response{ + Events: results, + }, nil +} + +// getEventsFromAnyExeNode retrieves the given events from any EN in `execNodes`. +// We attempt querying each EN in sequence. If any EN returns a valid response, then errors from +// other ENs are logged and swallowed. If all ENs fail to return a valid response, then an +// error aggregating all failures is returned. +func (e *ENEventProvider) getEventsFromAnyExeNode( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetEventsForBlockIDsRequest, +) (*execproto.GetEventsForBlockIDsResponse, *flow.IdentitySkeleton, error) { + var resp *execproto.GetEventsForBlockIDsResponse + var execNode *flow.IdentitySkeleton + errToReturn := e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + start := time.Now() + resp, err = e.tryGetEvents(ctx, node, req) + duration := time.Since(start) + + logger := e.log.With(). + Str("execution_node", node.String()). + Str("event", req.GetType()). + Int("blocks", len(req.BlockIds)). + Int64("rtt_ms", duration.Milliseconds()). + Logger() + + if err == nil { + // return if any execution node replied successfully + logger.Debug().Msg("Successfully got events") + execNode = node + return nil + } + + logger.Err(err).Msg("failed to execute Events") + return err + }, + nil, + ) + + return resp, execNode, errToReturn +} + +func (e *ENEventProvider) tryGetEvents( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetEventsForBlockIDsRequest, +) (*execproto.GetEventsForBlockIDsResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + + return execRPCClient.GetEventsForBlockIDs(ctx, req) +} + +// verifyAndConvertToAccessEvents converts execution node api result to access node api result, +// and verifies that the results contains results from each block that was requested +func verifyAndConvertToAccessEvents( + execEvents []*execproto.GetEventsForBlockIDsResponse_Result, + requestedBlockInfos []BlockMetadata, + from entities.EventEncodingVersion, + to entities.EventEncodingVersion, +) ([]flow.BlockEvents, error) { + if len(execEvents) != len(requestedBlockInfos) { + return nil, errors.New("number of results does not match number of blocks requested") + } + + requestedBlockInfoSet := map[string]BlockMetadata{} + for _, header := range requestedBlockInfos { + requestedBlockInfoSet[header.ID.String()] = header + } + + results := make([]flow.BlockEvents, len(execEvents)) + + for i, result := range execEvents { + blockInfo, expected := requestedBlockInfoSet[hex.EncodeToString(result.GetBlockId())] + if !expected { + return nil, fmt.Errorf("unexpected blockID from exe node %x", result.GetBlockId()) + } + if result.GetBlockHeight() != blockInfo.Height { + return nil, fmt.Errorf("unexpected block height %d for block %x from exe node", + result.GetBlockHeight(), + result.GetBlockId()) + } + + events, err := convert.MessagesToEventsWithEncodingConversion(result.GetEvents(), from, to) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal events in event %d with encoding version %s: %w", + i, to.String(), err) + } + + results[i] = flow.BlockEvents{ + BlockID: blockInfo.ID, + BlockHeight: blockInfo.Height, + BlockTimestamp: blockInfo.Timestamp, + Events: events, + } + } + + return results, nil +} diff --git a/engine/access/rpc/backend/events/provider/failover.go b/engine/access/rpc/backend/events/provider/failover.go new file mode 100644 index 00000000000..82792a19395 --- /dev/null +++ b/engine/access/rpc/backend/events/provider/failover.go @@ -0,0 +1,75 @@ +package provider + +import ( + "context" + "sort" + + "github.com/rs/zerolog" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/model/flow" +) + +type FailoverEventProvider struct { + log zerolog.Logger + localProvider EventProvider + execNodeProvider EventProvider +} + +var _ EventProvider = (*FailoverEventProvider)(nil) + +func NewFailoverEventProvider( + log zerolog.Logger, + localProvider EventProvider, + execNodeProvider EventProvider, +) *FailoverEventProvider { + return &FailoverEventProvider{ + log: log.With().Str("event_provider", "failover").Logger(), + localProvider: localProvider, + execNodeProvider: execNodeProvider, + } +} + +func (f *FailoverEventProvider) Events( + ctx context.Context, + blocks []BlockMetadata, + eventType flow.EventType, + encoding entities.EventEncodingVersion, +) (Response, error) { + localEvents, localErr := f.localProvider.Events(ctx, blocks, eventType, encoding) + if localErr != nil { + f.log.Debug().Err(localErr). + Msg("failed to get events from local storage. will try to get them from execution node") + + localEvents.MissingBlocks = blocks + } + + if len(localEvents.MissingBlocks) == 0 { + return localEvents, nil + } + + f.log.Debug(). + Int("missing_blocks", len(localEvents.MissingBlocks)). + Msg("querying execution nodes for events from missing blocks") + + execNodeEvents, execNodeErr := f.execNodeProvider.Events(ctx, localEvents.MissingBlocks, eventType, encoding) + if execNodeErr != nil { + return Response{}, execNodeErr + } + + // sort ascending by block height + // this is needed because some blocks may be retrieved from storage and others from execution nodes. + // most likely, the earlier blocks will all be found in local storage, but that's not guaranteed, + // especially for nodes started after a spork, or once pruning is enabled. + // Note: this may not match the order of the original request for clients using GetEventsForBlockIDs + // that provide out of order block IDs + combinedEvents := append(localEvents.Events, execNodeEvents.Events...) + sort.Slice(combinedEvents, func(i, j int) bool { + return combinedEvents[i].BlockHeight < combinedEvents[j].BlockHeight + }) + + return Response{ + Events: combinedEvents, + }, nil +} diff --git a/engine/access/rpc/backend/events/provider/local.go b/engine/access/rpc/backend/events/provider/local.go new file mode 100644 index 00000000000..d4be3efb64e --- /dev/null +++ b/engine/access/rpc/backend/events/provider/local.go @@ -0,0 +1,89 @@ +package provider + +import ( + "context" + "errors" + "fmt" + + "google.golang.org/grpc/codes" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/storage" +) + +type LocalEventProvider struct { + index *index.EventsIndex +} + +var _ EventProvider = (*LocalEventProvider)(nil) + +func NewLocalEventProvider(index *index.EventsIndex) *LocalEventProvider { + return &LocalEventProvider{ + index: index, + } +} + +func (l *LocalEventProvider) Events( + ctx context.Context, + blocks []BlockMetadata, + eventType flow.EventType, + encoding entities.EventEncodingVersion, +) (Response, error) { + missing := make([]BlockMetadata, 0) + resp := make([]flow.BlockEvents, 0) + + for _, blockInfo := range blocks { + if ctx.Err() != nil { + return Response{}, rpc.ConvertError(ctx.Err(), "failed to get events from storage", codes.Canceled) + } + + events, err := l.index.ByBlockID(blockInfo.ID, blockInfo.Height) + if err != nil { + if errors.Is(err, storage.ErrNotFound) || + errors.Is(err, storage.ErrHeightNotIndexed) || + errors.Is(err, indexer.ErrIndexNotInitialized) { + missing = append(missing, blockInfo) + continue + } + err = fmt.Errorf("failed to get events for block %s: %w", blockInfo.ID, err) + return Response{}, rpc.ConvertError(err, "failed to get events from storage", codes.Internal) + } + + filteredEvents := make([]flow.Event, 0) + for _, event := range events { + if event.Type != eventType { + continue + } + + // events are encoded in CCF format in storage. convert to JSON-CDC if requested + if encoding == entities.EventEncodingVersion_JSON_CDC_V0 { + payload, err := convert.CcfPayloadToJsonPayload(event.Payload) + if err != nil { + err = fmt.Errorf("failed to convert event payload for block %s: %w", blockInfo.ID, err) + return Response{}, rpc.ConvertError(err, "failed to convert event payload", codes.Internal) + } + event.Payload = payload + } + + filteredEvents = append(filteredEvents, event) + } + + resp = append(resp, flow.BlockEvents{ + BlockID: blockInfo.ID, + BlockHeight: blockInfo.Height, + BlockTimestamp: blockInfo.Timestamp, + Events: filteredEvents, + }) + } + + return Response{ + Events: resp, + MissingBlocks: missing, + }, nil +} diff --git a/engine/access/rpc/backend/events/provider/mock/event_provider.go b/engine/access/rpc/backend/events/provider/mock/event_provider.go new file mode 100644 index 00000000000..7c4fbd5f15a --- /dev/null +++ b/engine/access/rpc/backend/events/provider/mock/event_provider.go @@ -0,0 +1,61 @@ +// Code generated by mockery v2.53.3. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + entities "github.com/onflow/flow/protobuf/go/flow/entities" + + mock "github.com/stretchr/testify/mock" + + provider "github.com/onflow/flow-go/engine/access/rpc/backend/events/provider" +) + +// EventProvider is an autogenerated mock type for the EventProvider type +type EventProvider struct { + mock.Mock +} + +// Events provides a mock function with given fields: ctx, blocks, eventType, requiredEventEncodingVersion +func (_m *EventProvider) Events(ctx context.Context, blocks []provider.BlockMetadata, eventType flow.EventType, requiredEventEncodingVersion entities.EventEncodingVersion) (provider.Response, error) { + ret := _m.Called(ctx, blocks, eventType, requiredEventEncodingVersion) + + if len(ret) == 0 { + panic("no return value specified for Events") + } + + var r0 provider.Response + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []provider.BlockMetadata, flow.EventType, entities.EventEncodingVersion) (provider.Response, error)); ok { + return rf(ctx, blocks, eventType, requiredEventEncodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, []provider.BlockMetadata, flow.EventType, entities.EventEncodingVersion) provider.Response); ok { + r0 = rf(ctx, blocks, eventType, requiredEventEncodingVersion) + } else { + r0 = ret.Get(0).(provider.Response) + } + + if rf, ok := ret.Get(1).(func(context.Context, []provider.BlockMetadata, flow.EventType, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, blocks, eventType, requiredEventEncodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewEventProvider creates a new instance of EventProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *EventProvider { + mock := &EventProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/backend/events/provider/provider.go b/engine/access/rpc/backend/events/provider/provider.go new file mode 100644 index 00000000000..e7b83d4b849 --- /dev/null +++ b/engine/access/rpc/backend/events/provider/provider.go @@ -0,0 +1,32 @@ +package provider + +import ( + "context" + "time" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/model/flow" +) + +type EventProvider interface { + Events( + ctx context.Context, + blocks []BlockMetadata, + eventType flow.EventType, + requiredEventEncodingVersion entities.EventEncodingVersion, + ) (Response, error) +} + +// BlockMetadata is used to capture information about requested blocks to avoid repeated blockID +// calculations and passing around full block headers. +type BlockMetadata struct { + ID flow.Identifier + Height uint64 + Timestamp time.Time +} + +type Response struct { + Events []flow.BlockEvents + MissingBlocks []BlockMetadata +} diff --git a/engine/access/rpc/backend/historical_access_test.go b/engine/access/rpc/backend/historical_access_test.go index a776d8337e8..4c6a076bde0 100644 --- a/engine/access/rpc/backend/historical_access_test.go +++ b/engine/access/rpc/backend/historical_access_test.go @@ -15,7 +15,6 @@ import ( // TestHistoricalTransactionResult tests to see if the historical transaction status can be retrieved func (suite *Suite) TestHistoricalTransactionResult() { - ctx := context.Background() collection := unittest.CollectionFixture(1) transactionBody := collection.Transactions[0] @@ -55,7 +54,8 @@ func (suite *Suite) TestHistoricalTransactionResult() { flow.ZeroID, entities.EventEncodingVersion_JSON_CDC_V0, ) - suite.checkResponse(result, err) + suite.Require().NoError(err) + suite.Require().NotNil(result) // status should be sealed suite.Assert().Equal(flow.TransactionStatusSealed, result.Status) @@ -98,7 +98,8 @@ func (suite *Suite) TestHistoricalTransaction() { // Make the call for the transaction result tx, err := backend.GetTransaction(ctx, txID) - suite.checkResponse(tx, err) + suite.Require().NoError(err) + suite.Require().NotNil(tx) suite.assertAllExpectations() } diff --git a/engine/access/rpc/backend/node_communicator.go b/engine/access/rpc/backend/node_communicator/communicator.go similarity index 99% rename from engine/access/rpc/backend/node_communicator.go rename to engine/access/rpc/backend/node_communicator/communicator.go index 34b75dddab0..626841c3d38 100644 --- a/engine/access/rpc/backend/node_communicator.go +++ b/engine/access/rpc/backend/node_communicator/communicator.go @@ -1,4 +1,4 @@ -package backend +package node_communicator import ( "github.com/hashicorp/go-multierror" diff --git a/engine/access/rpc/backend/mock/communicator.go b/engine/access/rpc/backend/node_communicator/mock/communicator.go similarity index 100% rename from engine/access/rpc/backend/mock/communicator.go rename to engine/access/rpc/backend/node_communicator/mock/communicator.go diff --git a/engine/access/rpc/backend/node_selector.go b/engine/access/rpc/backend/node_communicator/selector.go similarity index 99% rename from engine/access/rpc/backend/node_selector.go rename to engine/access/rpc/backend/node_communicator/selector.go index 4aab3a89ca5..e5bd9f6e4b3 100644 --- a/engine/access/rpc/backend/node_selector.go +++ b/engine/access/rpc/backend/node_communicator/selector.go @@ -1,4 +1,4 @@ -package backend +package node_communicator import ( "fmt" diff --git a/engine/access/rpc/backend/query_mode/mode.go b/engine/access/rpc/backend/query_mode/mode.go new file mode 100644 index 00000000000..b9678674ed2 --- /dev/null +++ b/engine/access/rpc/backend/query_mode/mode.go @@ -0,0 +1,55 @@ +package query_mode + +import ( + "errors" +) + +type IndexQueryMode int + +const ( + // IndexQueryModeLocalOnly executes scripts and gets accounts using only local storage + IndexQueryModeLocalOnly IndexQueryMode = iota + 1 + + // IndexQueryModeExecutionNodesOnly executes scripts and gets accounts using only + // execution nodes + IndexQueryModeExecutionNodesOnly + + // IndexQueryModeFailover executes scripts and gets accounts using local storage first, + // then falls back to execution nodes if data is not available for the height or if request + // failed due to a non-user error. + IndexQueryModeFailover + + // IndexQueryModeCompare executes scripts and gets accounts using both local storage and + // execution nodes and compares the results. The execution node result is always returned. + IndexQueryModeCompare +) + +func ParseIndexQueryMode(s string) (IndexQueryMode, error) { + switch s { + case IndexQueryModeLocalOnly.String(): + return IndexQueryModeLocalOnly, nil + case IndexQueryModeExecutionNodesOnly.String(): + return IndexQueryModeExecutionNodesOnly, nil + case IndexQueryModeFailover.String(): + return IndexQueryModeFailover, nil + case IndexQueryModeCompare.String(): + return IndexQueryModeCompare, nil + default: + return 0, errors.New("invalid script execution mode") + } +} + +func (m IndexQueryMode) String() string { + switch m { + case IndexQueryModeLocalOnly: + return "local-only" + case IndexQueryModeExecutionNodesOnly: + return "execution-nodes-only" + case IndexQueryModeFailover: + return "failover" + case IndexQueryModeCompare: + return "compare" + default: + return "" + } +} diff --git a/engine/access/rpc/backend/retry.go b/engine/access/rpc/backend/retry.go deleted file mode 100644 index d5389e29b19..00000000000 --- a/engine/access/rpc/backend/retry.go +++ /dev/null @@ -1,148 +0,0 @@ -package backend - -import ( - "context" - "errors" - "fmt" - "sync" - - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state" - "github.com/onflow/flow-go/storage" -) - -// retryFrequency has to be less than TransactionExpiry or else this module does nothing -const retryFrequency uint64 = 120 // Blocks - -// Retry implements a simple retry mechanism for transaction submission. -type Retry struct { - mu sync.RWMutex - // pending Transactions - transactionByReferencBlockHeight map[uint64]map[flow.Identifier]*flow.TransactionBody - backend *Backend - active bool - log zerolog.Logger // default logger -} - -func newRetry(log zerolog.Logger) *Retry { - return &Retry{ - log: log, - transactionByReferencBlockHeight: map[uint64]map[flow.Identifier]*flow.TransactionBody{}, - } -} - -func (r *Retry) Activate() *Retry { - r.active = true - return r -} - -func (r *Retry) IsActive() bool { - return r.active -} - -func (r *Retry) SetBackend(b *Backend) *Retry { - r.backend = b - return r -} - -// Retry attempts to resend transactions for a specified block height. -// It performs cleanup operations, including pruning old transactions, and retries sending -// transactions that are still pending. -// The method takes a block height as input. If the provided height is lower than -// flow.DefaultTransactionExpiry, no retries are performed, and the method returns nil. -// No errors expected during normal operations. -func (r *Retry) Retry(height uint64) error { - // No need to retry if height is lower than DefaultTransactionExpiry - if height < flow.DefaultTransactionExpiry { - return nil - } - - // naive cleanup for now, prune every 120 Blocks - if height%retryFrequency == 0 { - r.prune(height) - } - - heightToRetry := height - flow.DefaultTransactionExpiry + retryFrequency - - for heightToRetry < height { - err := r.retryTxsAtHeight(heightToRetry) - if err != nil { - return err - } - heightToRetry = heightToRetry + retryFrequency - } - return nil -} - -// RegisterTransaction adds a transaction that could possibly be retried -func (r *Retry) RegisterTransaction(height uint64, tx *flow.TransactionBody) { - r.mu.Lock() - defer r.mu.Unlock() - if r.transactionByReferencBlockHeight[height] == nil { - r.transactionByReferencBlockHeight[height] = make(map[flow.Identifier]*flow.TransactionBody) - } - r.transactionByReferencBlockHeight[height][tx.ID()] = tx -} - -func (r *Retry) prune(height uint64) { - r.mu.Lock() - defer r.mu.Unlock() - // If height is less than the default, there will be no expired Transactions - if height < flow.DefaultTransactionExpiry { - return - } - for h := range r.transactionByReferencBlockHeight { - if h < height-flow.DefaultTransactionExpiry { - delete(r.transactionByReferencBlockHeight, h) - } - } -} - -// retryTxsAtHeight retries transactions at a specific block height. -// It looks up transactions at the specified height and retries sending -// raw transactions for those that are still pending. It also cleans up -// transactions that are no longer pending or have an unknown status. -// Error returns: -// - errors are unexpected and potentially symptoms of internal implementation bugs or state corruption (fatal). -func (r *Retry) retryTxsAtHeight(heightToRetry uint64) error { - r.mu.Lock() - defer r.mu.Unlock() - txsAtHeight := r.transactionByReferencBlockHeight[heightToRetry] - for txID, tx := range txsAtHeight { - // find the block for the transaction - block, err := r.backend.lookupBlock(txID) - if err != nil { - if !errors.Is(err, storage.ErrNotFound) { - return err - } - block = nil - } - - // find the transaction status - var status flow.TransactionStatus - if block == nil { - status, err = r.backend.DeriveUnknownTransactionStatus(tx.ReferenceBlockID) - } else { - status, err = r.backend.DeriveTransactionStatus(block.Header.Height, false) - } - - if err != nil { - if !errors.Is(err, state.ErrUnknownSnapshotReference) { - return err - } - continue - } - if status == flow.TransactionStatusPending { - err = r.backend.SendRawTransaction(context.Background(), tx) - if err != nil { - r.log.Info().Str("retry", fmt.Sprintf("retryTxsAtHeight: %v", heightToRetry)).Err(err).Msg("failed to send raw transactions") - } - } else if status != flow.TransactionStatusUnknown { - // not pending or unknown, don't need to retry anymore - delete(txsAtHeight, txID) - } - } - return nil -} diff --git a/engine/access/rpc/backend/retry_test.go b/engine/access/rpc/backend/retry_test.go deleted file mode 100644 index 544c9a9669b..00000000000 --- a/engine/access/rpc/backend/retry_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package backend - -import ( - "context" - - "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/flow/protobuf/go/flow/entities" - "github.com/onflow/flow/protobuf/go/flow/execution" - "github.com/stretchr/testify/mock" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/model/flow" - protocol "github.com/onflow/flow-go/state/protocol/mock" - realstorage "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestTransactionRetry tests that the retry mechanism will send retries at specific times -func (suite *Suite) TestTransactionRetry() { - collection := unittest.CollectionFixture(1) - transactionBody := collection.Transactions[0] - block := unittest.BlockFixture() - // Height needs to be at least DefaultTransactionExpiry before we start doing retries - block.Header.Height = flow.DefaultTransactionExpiry + 1 - transactionBody.SetReferenceBlockID(block.ID()) - headBlock := unittest.BlockFixture() - headBlock.Header.Height = block.Header.Height - 1 // head is behind the current block - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - - suite.snapshot.On("Head").Return(headBlock.Header, nil) - snapshotAtBlock := new(protocol.Snapshot) - snapshotAtBlock.On("Head").Return(block.Header, nil) - suite.state.On("AtBlockID", block.ID()).Return(snapshotAtBlock, nil) - - // collection storage returns a not found error - suite.collections.On("LightByTransactionID", transactionBody.ID()).Return(nil, realstorage.ErrNotFound) - - params := suite.defaultBackendParams() - - // Setup Handler + Retry - backend, err := New(params) - suite.Require().NoError(err) - - retry := newRetry(suite.log).SetBackend(backend).Activate() - backend.retry = retry - - retry.RegisterTransaction(block.Header.Height, transactionBody) - - suite.colClient.On("SendTransaction", mock.Anything, mock.Anything).Return(&access.SendTransactionResponse{}, nil) - - // Don't retry on every height - err = retry.Retry(block.Header.Height + 1) - suite.Require().NoError(err) - - suite.colClient.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) - - // Retry every `retryFrequency` - err = retry.Retry(block.Header.Height + retryFrequency) - suite.Require().NoError(err) - - suite.colClient.AssertNumberOfCalls(suite.T(), "SendTransaction", 1) - - // do not retry if expired - err = retry.Retry(block.Header.Height + retryFrequency + flow.DefaultTransactionExpiry) - suite.Require().NoError(err) - - // Should've still only been called once - suite.colClient.AssertNumberOfCalls(suite.T(), "SendTransaction", 1) - - suite.assertAllExpectations() -} - -// TestSuccessfulTransactionsDontRetry tests that the retry mechanism will send retries at specific times -func (suite *Suite) TestSuccessfulTransactionsDontRetry() { - ctx := context.Background() - collection := unittest.CollectionFixture(1) - transactionBody := collection.Transactions[0] - block := unittest.BlockFixture() - // Height needs to be at least DefaultTransactionExpiry before we start doing retries - block.Header.Height = flow.DefaultTransactionExpiry + 1 - refBlock := unittest.BlockFixture() - refBlock.Header.Height = 2 - transactionBody.SetReferenceBlockID(refBlock.ID()) - - block.SetPayload( - unittest.PayloadFixture( - unittest.WithGuarantees( - unittest.CollectionGuaranteesWithCollectionIDFixture([]*flow.Collection{&collection})...))) - - light := collection.Light() - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - // transaction storage returns the corresponding transaction - suite.transactions.On("ByID", transactionBody.ID()).Return(transactionBody, nil) - // collection storage returns the corresponding collection - suite.collections.On("LightByTransactionID", transactionBody.ID()).Return(&light, nil) - suite.collections.On("LightByID", light.ID()).Return(&light, nil) - // block storage returns the corresponding block - suite.blocks.On("ByCollectionID", collection.ID()).Return(&block, nil) - - txID := transactionBody.ID() - blockID := block.ID() - exeEventReq := execution.GetTransactionResultRequest{ - BlockId: blockID[:], - TransactionId: txID[:], - } - exeEventResp := execution.GetTransactionResultResponse{ - Events: nil, - } - - _, enIDs := suite.setupReceipts(&block) - suite.snapshot.On("Identities", mock.Anything).Return(enIDs, nil) - connFactory := suite.setupConnectionFactory() - - params := suite.defaultBackendParams() - params.ConnFactory = connFactory - - backend, err := New(params) - suite.Require().NoError(err) - - retry := newRetry(suite.log).SetBackend(backend).Activate() - backend.retry = retry - - retry.RegisterTransaction(block.Header.Height, transactionBody) - - suite.colClient.On("SendTransaction", mock.Anything, mock.Anything).Return(&access.SendTransactionResponse{}, nil) - - // return not found to return finalized status - suite.execClient.On("GetTransactionResult", ctx, &exeEventReq). - Return(&exeEventResp, status.Errorf(codes.NotFound, "not found")). - Times(len(enIDs)) // should call each EN once - - // first call - when block under test is greater height than the sealed head, but execution node does not know about Tx - result, err := backend.GetTransactionResult( - ctx, - txID, - flow.ZeroID, - flow.ZeroID, - entities.EventEncodingVersion_JSON_CDC_V0, - ) - suite.checkResponse(result, err) - - // status should be finalized since the sealed Blocks is smaller in height - suite.Assert().Equal(flow.TransactionStatusFinalized, result.Status) - - // Don't retry now now that block is finalized - err = retry.Retry(block.Header.Height + 1) - suite.Require().NoError(err) - - suite.colClient.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) - - // Don't retry now now that block is finalized - err = retry.Retry(block.Header.Height + retryFrequency) - suite.Require().NoError(err) - - suite.colClient.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) - - // Don't retry now now that block is finalized - err = retry.Retry(block.Header.Height + retryFrequency + flow.DefaultTransactionExpiry) - suite.Require().NoError(err) - - // Should've still should not be called - suite.colClient.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) - - suite.assertAllExpectations() -} diff --git a/engine/access/rpc/backend/script_executor_test.go b/engine/access/rpc/backend/script_executor_test.go index 2d207194725..4d381bfd68a 100644 --- a/engine/access/rpc/backend/script_executor_test.go +++ b/engine/access/rpc/backend/script_executor_test.go @@ -20,6 +20,7 @@ import ( "github.com/onflow/flow-go/fvm/storage/derived" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/execution" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" @@ -96,6 +97,7 @@ func (s *ScriptExecutorSuite) bootstrap() { // SetupTest sets up the test environment for each test in the suite. // This includes initializing various components and mock objects needed for the tests. func (s *ScriptExecutorSuite) SetupTest() { + lockManager := storage.NewTestingLockManager() s.log = unittest.Logger() s.chain = flow.Emulator.Chain() @@ -129,7 +131,7 @@ func (s *ScriptExecutorSuite) SetupTest() { indexerCore, err := indexer.New( s.log, - metrics.NewNoopCollector(), + module.ExecutionStateIndexerMetrics(metrics.NewNoopCollector()), nil, s.registerIndex, s.headers, @@ -139,7 +141,8 @@ func (s *ScriptExecutorSuite) SetupTest() { nil, s.chain, derivedChainData, - nil, + module.CollectionExecutedMetric(metrics.NewNoopCollector()), + lockManager, ) s.Require().NoError(err) diff --git a/engine/access/rpc/backend/scripts/executor/compare.go b/engine/access/rpc/backend/scripts/executor/compare.go new file mode 100644 index 00000000000..15f87ce1fdf --- /dev/null +++ b/engine/access/rpc/backend/scripts/executor/compare.go @@ -0,0 +1,62 @@ +package executor + +import ( + "context" + "time" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/module" +) + +type ComparingScriptExecutor struct { + log zerolog.Logger + metrics module.BackendScriptsMetrics + + localExecutor ScriptExecutor + executionNodeExecutor ScriptExecutor + + scriptCache *LoggedScriptCache +} + +var _ ScriptExecutor = (*ComparingScriptExecutor)(nil) + +func NewComparingScriptExecutor( + log zerolog.Logger, + metrics module.BackendScriptsMetrics, + scriptCache *LoggedScriptCache, + localExecutor ScriptExecutor, + execNodeExecutor ScriptExecutor, +) *ComparingScriptExecutor { + return &ComparingScriptExecutor{ + log: zerolog.New(log).With().Str("script_executor", "comparing").Logger(), + metrics: metrics, + scriptCache: scriptCache, + localExecutor: localExecutor, + executionNodeExecutor: execNodeExecutor, + } +} + +func (c *ComparingScriptExecutor) Execute(ctx context.Context, request *Request) ([]byte, time.Duration, error) { + execResult, execDuration, execErr := c.executionNodeExecutor.Execute(ctx, request) + + // we can only compare the results if there were either no errors or a cadence error + // since we cannot distinguish the EN error as caused by the block being pruned or some other reason, + // which may produce a valid RN output but an error for the EN + isInvalidArgument := status.Code(execErr) == codes.InvalidArgument + if execErr != nil && !isInvalidArgument { + return nil, 0, execErr + } + + localResult, localDuration, localErr := c.localExecutor.Execute(ctx, request) + + resultComparer := newScriptResultComparison(c.log, c.metrics, c.scriptCache.shouldLogScript, request) + _ = resultComparer.compare( + newScriptResult(execResult, execDuration, execErr), + newScriptResult(localResult, localDuration, localErr), + ) + + return execResult, execDuration, execErr +} diff --git a/engine/access/rpc/backend/script_comparer.go b/engine/access/rpc/backend/scripts/executor/comparer.go similarity index 97% rename from engine/access/rpc/backend/script_comparer.go rename to engine/access/rpc/backend/scripts/executor/comparer.go index c71ce1fbd6c..46364ffcea0 100644 --- a/engine/access/rpc/backend/script_comparer.go +++ b/engine/access/rpc/backend/scripts/executor/comparer.go @@ -1,9 +1,10 @@ -package backend +package executor import ( "bytes" "crypto/md5" //nolint:gosec "encoding/base64" + "errors" "strings" "time" @@ -36,7 +37,7 @@ func newScriptResult(result []byte, duration time.Duration, err error) *scriptRe type scriptResultComparison struct { log zerolog.Logger metrics module.BackendScriptsMetrics - request *scriptExecutionRequest + request *Request shouldLogScript func(time.Time, [md5.Size]byte) bool } @@ -44,7 +45,7 @@ func newScriptResultComparison( log zerolog.Logger, metrics module.BackendScriptsMetrics, shouldLogScript func(time.Time, [md5.Size]byte) bool, - request *scriptExecutionRequest, + request *Request, ) *scriptResultComparison { return &scriptResultComparison{ log: log, @@ -130,7 +131,7 @@ func isOutOfRangeError(err error) bool { } func compareErrors(execErr, localErr error) bool { - if execErr == localErr { + if errors.Is(execErr, localErr) { return true } diff --git a/engine/access/rpc/backend/script_comparer_test.go b/engine/access/rpc/backend/scripts/executor/comparer_test.go similarity index 98% rename from engine/access/rpc/backend/script_comparer_test.go rename to engine/access/rpc/backend/scripts/executor/comparer_test.go index d675abcb638..56c756d795e 100644 --- a/engine/access/rpc/backend/script_comparer_test.go +++ b/engine/access/rpc/backend/scripts/executor/comparer_test.go @@ -1,4 +1,4 @@ -package backend +package executor import ( "fmt" @@ -79,7 +79,7 @@ func TestCompare(t *testing.T) { }, } - request := newScriptExecutionRequest(unittest.IdentifierFixture(), 1, []byte("script"), [][]byte{}) + request := NewScriptExecutionRequest(unittest.IdentifierFixture(), 1, []byte("script"), [][]byte{}) shouldLogScript := func(time.Time, [16]byte) bool { return true } comparer := newScriptResultComparison(logger, m, shouldLogScript, request) diff --git a/engine/access/rpc/backend/scripts/executor/execution_node.go b/engine/access/rpc/backend/scripts/executor/execution_node.go new file mode 100644 index 00000000000..25cf3778b77 --- /dev/null +++ b/engine/access/rpc/backend/scripts/executor/execution_node.go @@ -0,0 +1,128 @@ +package executor + +import ( + "context" + "time" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/rpc" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" +) + +type ENScriptExecutor struct { + log zerolog.Logger + metrics module.BackendScriptsMetrics //TODO: move this metrics to scriptCache struct? + + nodeProvider *commonrpc.ExecutionNodeIdentitiesProvider + nodeCommunicator node_communicator.Communicator + connFactory connection.ConnectionFactory + + scriptCache *LoggedScriptCache +} + +var _ ScriptExecutor = (*ENScriptExecutor)(nil) + +func NewENScriptExecutor( + log zerolog.Logger, + metrics module.BackendScriptsMetrics, + nodeProvider *commonrpc.ExecutionNodeIdentitiesProvider, + nodeCommunicator node_communicator.Communicator, + connFactory connection.ConnectionFactory, + scriptCache *LoggedScriptCache, +) *ENScriptExecutor { + return &ENScriptExecutor{ + log: zerolog.New(log).With().Str("script_executor", "execution_node").Logger(), + metrics: metrics, + nodeProvider: nodeProvider, + nodeCommunicator: nodeCommunicator, + connFactory: connFactory, + scriptCache: scriptCache, + } +} + +func (e *ENScriptExecutor) Execute(ctx context.Context, request *Request) ([]byte, time.Duration, error) { + // find few execution nodes which have executed the block earlier and provided an execution receipt for it + executors, err := e.nodeProvider.ExecutionNodesForBlockID(ctx, request.blockID) + if err != nil { + return nil, 0, status.Errorf( + codes.Internal, "failed to find script executors at blockId %v: %v", + request.blockID.String(), + err, + ) + } + + var result []byte + var executionTime time.Time + var execDuration time.Duration + errToReturn := e.nodeCommunicator.CallAvailableNode( + executors, + func(node *flow.IdentitySkeleton) error { + execStartTime := time.Now() + + result, err = e.tryExecuteScriptOnExecutionNode(ctx, node.Address, request) + + executionTime = time.Now() + execDuration = executionTime.Sub(execStartTime) + + if err != nil { + return err + } + + e.scriptCache.LogExecutedScript(request.blockID, request.insecureScriptHash, executionTime, node.Address, request.script, execDuration) + e.metrics.ScriptExecuted(time.Since(execStartTime), len(request.script)) + + return nil + }, + func(node *flow.IdentitySkeleton, err error) bool { + if status.Code(err) == codes.InvalidArgument { + e.scriptCache.LogFailedScript(request.blockID, request.insecureScriptHash, executionTime, node.Address, request.script) + return true + } + return false + }, + ) + + if errToReturn != nil { + if status.Code(errToReturn) != codes.InvalidArgument { + e.metrics.ScriptExecutionErrorOnExecutionNode() + e.log.Error().Err(errToReturn).Msg("script execution failed for execution node internal reasons") + } + return nil, execDuration, rpc.ConvertError(errToReturn, "failed to execute script on execution nodes", codes.Internal) + } + + return result, execDuration, nil +} + +// tryExecuteScriptOnExecutionNode attempts to execute the script on the given execution node. +func (e *ENScriptExecutor) tryExecuteScriptOnExecutionNode( + ctx context.Context, + executorAddress string, + r *Request, +) ([]byte, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(executorAddress) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to create client for execution node %s: %v", + executorAddress, err) + } + defer closer.Close() + + execResp, err := execRPCClient.ExecuteScriptAtBlockID(ctx, &execproto.ExecuteScriptAtBlockIDRequest{ + BlockId: r.blockID[:], + Script: r.script, + Arguments: r.arguments, + }) + if err != nil { + return nil, status.Errorf(status.Code(err), "failed to execute the script on the execution node %s: %v", executorAddress, err) + } + + return execResp.GetValue(), nil +} diff --git a/engine/access/rpc/backend/scripts/executor/executor.go b/engine/access/rpc/backend/scripts/executor/executor.go new file mode 100644 index 00000000000..807db700416 --- /dev/null +++ b/engine/access/rpc/backend/scripts/executor/executor.go @@ -0,0 +1,42 @@ +package executor + +import ( + "context" + "crypto/md5" //nolint:gosec + "time" + + "github.com/onflow/flow-go/model/flow" +) + +type ScriptExecutor interface { + Execute(ctx context.Context, scriptRequest *Request) ([]byte, time.Duration, error) +} + +// Request encapsulates the data needed to execute a script to make it easier +// to pass around between the various methods involved in script execution +type Request struct { + blockID flow.Identifier + height uint64 + script []byte + arguments [][]byte + insecureScriptHash [md5.Size]byte +} + +func NewScriptExecutionRequest( + blockID flow.Identifier, + height uint64, + script []byte, + arguments [][]byte, +) *Request { + return &Request{ + blockID: blockID, + height: height, + script: script, + arguments: arguments, + + // encode to MD5 as low compute/memory lookup key + // CAUTION: cryptographically insecure md5 is used here, but only to de-duplicate logs. + // *DO NOT* use this hash for any protocol-related or cryptographic functions. + insecureScriptHash: md5.Sum(script), //nolint:gosec + } +} diff --git a/engine/access/rpc/backend/scripts/executor/failover.go b/engine/access/rpc/backend/scripts/executor/failover.go new file mode 100644 index 00000000000..3d69ca5a217 --- /dev/null +++ b/engine/access/rpc/backend/scripts/executor/failover.go @@ -0,0 +1,38 @@ +package executor + +import ( + "context" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type FailoverScriptExecutor struct { + localExecutor ScriptExecutor + executionNodeExecutor ScriptExecutor +} + +var _ ScriptExecutor = (*FailoverScriptExecutor)(nil) + +func NewFailoverScriptExecutor(localExecutor ScriptExecutor, execNodeExecutor ScriptExecutor) *FailoverScriptExecutor { + return &FailoverScriptExecutor{ + localExecutor: localExecutor, + executionNodeExecutor: execNodeExecutor, + } +} + +func (f *FailoverScriptExecutor) Execute(ctx context.Context, request *Request) ([]byte, time.Duration, error) { + localResult, localDuration, localErr := f.localExecutor.Execute(ctx, request) + + isInvalidArgument := status.Code(localErr) == codes.InvalidArgument + isCanceled := status.Code(localErr) == codes.Canceled + if localErr == nil || isInvalidArgument || isCanceled { + return localResult, localDuration, localErr + } + + // Note: scripts that timeout are retried on the execution nodes since ANs may have performance + // issues for some scripts. + execResult, execDuration, execErr := f.executionNodeExecutor.Execute(ctx, request) + return execResult, execDuration, execErr +} diff --git a/engine/access/rpc/backend/scripts/executor/local.go b/engine/access/rpc/backend/scripts/executor/local.go new file mode 100644 index 00000000000..a875fc57264 --- /dev/null +++ b/engine/access/rpc/backend/scripts/executor/local.go @@ -0,0 +1,113 @@ +package executor + +import ( + "context" + "time" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/common/rpc" + fvmerrors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/utils/logging" +) + +type LocalScriptExecutor struct { + log zerolog.Logger + metrics module.BackendScriptsMetrics + + scriptExecutor execution.ScriptExecutor + scriptCache *LoggedScriptCache +} + +var _ ScriptExecutor = (*LocalScriptExecutor)(nil) + +func NewLocalScriptExecutor( + log zerolog.Logger, + metrics module.BackendScriptsMetrics, + executor execution.ScriptExecutor, + scriptCache *LoggedScriptCache, +) *LocalScriptExecutor { + return &LocalScriptExecutor{ + log: zerolog.New(log).With().Str("script_executor", "local").Logger(), + metrics: metrics, + scriptCache: scriptCache, + scriptExecutor: executor, + } +} + +func (l *LocalScriptExecutor) Execute(ctx context.Context, r *Request) ([]byte, time.Duration, error) { + execStartTime := time.Now() + + result, err := l.scriptExecutor.ExecuteAtBlockHeight(ctx, r.script, r.arguments, r.height) + + execEndTime := time.Now() + execDuration := execEndTime.Sub(execStartTime) + + log := l.log.With(). + Str("script_executor_addr", "localhost"). + Hex("block_id", logging.ID(r.blockID)). + Uint64("height", r.height). + Hex("script_hash", r.insecureScriptHash[:]). + Dur("execution_dur_ms", execDuration). + Logger() + + if err != nil { + convertedErr := convertScriptExecutionError(err, r.height) + + switch status.Code(convertedErr) { + case codes.InvalidArgument, codes.Canceled, codes.DeadlineExceeded: + l.scriptCache.LogFailedScript(r.blockID, r.insecureScriptHash, execEndTime, "localhost", r.script) + + default: + log.Debug().Err(err).Msg("script execution failed") + l.metrics.ScriptExecutionErrorLocal() //TODO: this should be called in above cases as well? + } + + return nil, execDuration, convertedErr + } + + l.scriptCache.LogExecutedScript(r.blockID, r.insecureScriptHash, execEndTime, "localhost", r.script, execDuration) + l.metrics.ScriptExecuted(execDuration, len(r.script)) + + return result, execDuration, nil +} + +// convertScriptExecutionError converts the script execution error to a gRPC error +func convertScriptExecutionError(err error, height uint64) error { + if err == nil { + return nil + } + + var failure fvmerrors.CodedFailure + if fvmerrors.As(err, &failure) { + return rpc.ConvertError(err, "failed to execute script", codes.Internal) + } + + // general FVM/ledger errors + var coded fvmerrors.CodedError + if fvmerrors.As(err, &coded) { + switch coded.Code() { + case fvmerrors.ErrCodeScriptExecutionCancelledError: + return status.Errorf(codes.Canceled, "script execution canceled: %v", err) + + case fvmerrors.ErrCodeScriptExecutionTimedOutError: + return status.Errorf(codes.DeadlineExceeded, "script execution timed out: %v", err) + + case fvmerrors.ErrCodeComputationLimitExceededError: + return status.Errorf(codes.ResourceExhausted, "script execution computation limit exceeded: %v", err) + + case fvmerrors.ErrCodeMemoryLimitExceededError: + return status.Errorf(codes.ResourceExhausted, "script execution memory limit exceeded: %v", err) + + default: + // runtime errors + return status.Errorf(codes.InvalidArgument, "failed to execute script: %v", err) + } + } + + return rpc.ConvertIndexError(err, height, "failed to execute script") +} diff --git a/engine/access/rpc/backend/scripts/executor/logged_script_cache.go b/engine/access/rpc/backend/scripts/executor/logged_script_cache.go new file mode 100644 index 00000000000..965efcd8e85 --- /dev/null +++ b/engine/access/rpc/backend/scripts/executor/logged_script_cache.go @@ -0,0 +1,76 @@ +package executor + +import ( + "crypto/md5" //nolint:gosec + "time" + + lru "github.com/hashicorp/golang-lru/v2" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" +) + +// uniqueScriptLoggingTimeWindow is the duration for checking the uniqueness of scripts sent for execution +const uniqueScriptLoggingTimeWindow = 10 * time.Minute + +type LoggedScriptCache struct { + log zerolog.Logger + loggedScripts *lru.Cache[[md5.Size]byte, time.Time] +} + +func NewLoggedScriptCache(log zerolog.Logger, loggedScripts *lru.Cache[[md5.Size]byte, time.Time]) *LoggedScriptCache { + return &LoggedScriptCache{ + log: log, + loggedScripts: loggedScripts, + } +} + +func (s *LoggedScriptCache) LogExecutedScript( + blockID flow.Identifier, + scriptHash [md5.Size]byte, + executionTime time.Time, + address string, + script []byte, + dur time.Duration, +) { + if s.shouldLogScript(executionTime, scriptHash) { + s.log.Debug(). + Str("block_id", blockID.String()). + Str("script_executor_addr", address). + Str("script", string(script)). + Dur("execution_dur_ms", dur). + Msg("Successfully executed script") + + s.loggedScripts.Add(scriptHash, executionTime) + } +} + +func (s *LoggedScriptCache) LogFailedScript( + blockID flow.Identifier, + scriptHash [md5.Size]byte, + executionTime time.Time, + address string, + script []byte, +) { + logEvent := s.log.Debug(). + Str("block_id", blockID.String()). + Str("script_executor_addr", address) + + if s.shouldLogScript(executionTime, scriptHash) { + logEvent.Str("script", string(script)) + } + + logEvent.Msg("failed to execute script") + s.loggedScripts.Add(scriptHash, executionTime) +} + +func (s *LoggedScriptCache) shouldLogScript(execTime time.Time, scriptHash [md5.Size]byte) bool { + if s.log.GetLevel() > zerolog.DebugLevel { + return false + } + timestamp, seen := s.loggedScripts.Get(scriptHash) + if seen { + return execTime.Sub(timestamp) >= uniqueScriptLoggingTimeWindow + } + return true +} diff --git a/engine/access/rpc/backend/scripts/scripts.go b/engine/access/rpc/backend/scripts/scripts.go new file mode 100644 index 00000000000..92776f50494 --- /dev/null +++ b/engine/access/rpc/backend/scripts/scripts.go @@ -0,0 +1,126 @@ +package scripts + +import ( + "context" + "crypto/md5" //nolint:gosec + "fmt" + "time" + + lru "github.com/hashicorp/golang-lru/v2" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + "github.com/onflow/flow-go/engine/access/rpc/backend/scripts/executor" + "github.com/onflow/flow-go/engine/access/rpc/connection" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +type Scripts struct { + headers storage.Headers + state protocol.State + executor executor.ScriptExecutor +} + +var _ access.ScriptsAPI = (*Scripts)(nil) + +func NewScriptsBackend( + log zerolog.Logger, + metrics module.BackendScriptsMetrics, + headers storage.Headers, + state protocol.State, + connFactory connection.ConnectionFactory, + nodeCommunicator node_communicator.Communicator, + scriptExecutor execution.ScriptExecutor, + scriptExecMode query_mode.IndexQueryMode, + nodeProvider *commonrpc.ExecutionNodeIdentitiesProvider, + loggedScripts *lru.Cache[[md5.Size]byte, time.Time], +) (*Scripts, error) { + var exec executor.ScriptExecutor + cache := executor.NewLoggedScriptCache(log, loggedScripts) + + switch scriptExecMode { + case query_mode.IndexQueryModeLocalOnly: + exec = executor.NewLocalScriptExecutor(log, metrics, scriptExecutor, cache) + + case query_mode.IndexQueryModeExecutionNodesOnly: + exec = executor.NewENScriptExecutor(log, metrics, nodeProvider, nodeCommunicator, connFactory, cache) + + case query_mode.IndexQueryModeFailover: + local := executor.NewLocalScriptExecutor(log, metrics, scriptExecutor, cache) + execNode := executor.NewENScriptExecutor(log, metrics, nodeProvider, nodeCommunicator, connFactory, cache) + exec = executor.NewFailoverScriptExecutor(local, execNode) + + case query_mode.IndexQueryModeCompare: + local := executor.NewLocalScriptExecutor(log, metrics, scriptExecutor, cache) + execNode := executor.NewENScriptExecutor(log, metrics, nodeProvider, nodeCommunicator, connFactory, cache) + exec = executor.NewComparingScriptExecutor(log, metrics, cache, local, execNode) + + default: + return nil, fmt.Errorf("invalid index mode: %s", scriptExecMode.String()) + } + + return &Scripts{ + headers: headers, + state: state, + executor: exec, + }, nil +} + +// ExecuteScriptAtLatestBlock executes provided script at the latest sealed block. +func (b *Scripts) ExecuteScriptAtLatestBlock( + ctx context.Context, + script []byte, + arguments [][]byte, +) ([]byte, error) { + latestHeader, err := b.state.Sealed().Head() + if err != nil { + // the latest sealed header MUST be available + err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, err + } + + res, _, err := b.executor.Execute(ctx, executor.NewScriptExecutionRequest(latestHeader.ID(), latestHeader.Height, script, arguments)) + return res, err +} + +// ExecuteScriptAtBlockID executes provided script at the provided block ID. +func (b *Scripts) ExecuteScriptAtBlockID( + ctx context.Context, + blockID flow.Identifier, + script []byte, + arguments [][]byte, +) ([]byte, error) { + header, err := b.headers.ByBlockID(blockID) + if err != nil { + return nil, commonrpc.ConvertStorageError(err) + } + + res, _, err := b.executor.Execute(ctx, executor.NewScriptExecutionRequest(blockID, header.Height, script, arguments)) + return res, err +} + +// ExecuteScriptAtBlockHeight executes provided script at the provided block height. +func (b *Scripts) ExecuteScriptAtBlockHeight( + ctx context.Context, + blockHeight uint64, + script []byte, + arguments [][]byte, +) ([]byte, error) { + header, err := b.headers.ByHeight(blockHeight) + if err != nil { + return nil, commonrpc.ConvertStorageError(common.ResolveHeightError(b.state.Params(), blockHeight, err)) + } + + res, _, err := b.executor.Execute(ctx, executor.NewScriptExecutionRequest(header.ID(), blockHeight, script, arguments)) + return res, err +} diff --git a/engine/access/rpc/backend/backend_scripts_test.go b/engine/access/rpc/backend/scripts/scripts_test.go similarity index 79% rename from engine/access/rpc/backend/backend_scripts_test.go rename to engine/access/rpc/backend/scripts/scripts_test.go index 43abac07a62..6bdda80de8c 100644 --- a/engine/access/rpc/backend/backend_scripts_test.go +++ b/engine/access/rpc/backend/scripts/scripts_test.go @@ -1,4 +1,4 @@ -package backend +package scripts import ( "context" @@ -10,6 +10,7 @@ import ( lru "github.com/hashicorp/golang-lru/v2" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -17,10 +18,14 @@ import ( execproto "github.com/onflow/flow/protobuf/go/flow/execution" access "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" commonrpc "github.com/onflow/flow-go/engine/common/rpc" fvmerrors "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/execution" execmock "github.com/onflow/flow-go/module/execution/mock" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" @@ -93,26 +98,31 @@ func (s *BackendScriptsSuite) SetupTest() { s.failingScript = []byte("access(all) fun main() { panic(\"!!\") }") } -func (s *BackendScriptsSuite) defaultBackend() *backendScripts { - loggedScripts, err := lru.New[[md5.Size]byte, time.Time](DefaultLoggedScriptsCacheSize) +func (s *BackendScriptsSuite) defaultBackend(executor execution.ScriptExecutor, mode query_mode.IndexQueryMode) *Scripts { + loggedScripts, err := lru.New[[md5.Size]byte, time.Time](common.DefaultLoggedScriptsCacheSize) s.Require().NoError(err) - return &backendScripts{ - log: s.log, - metrics: metrics.NewNoopCollector(), - state: s.state, - headers: s.headers, - loggedScripts: loggedScripts, - connFactory: s.connectionFactory, - nodeCommunicator: NewNodeCommunicator(false), - execNodeIdentitiesProvider: commonrpc.NewExecutionNodeIdentitiesProvider( + scripts, err := NewScriptsBackend( + s.log, + metrics.NewNoopCollector(), + s.headers, + s.state, + s.connectionFactory, + node_communicator.NewNodeCommunicator(false), + executor, + mode, + commonrpc.NewExecutionNodeIdentitiesProvider( s.log, s.state, s.receipts, flow.IdentifierList{}, flow.IdentifierList{}, ), - } + loggedScripts, + ) + require.NoError(s.T(), err) + + return scripts } // setupExecutionNodes sets up the mocks required to test against an EN backend @@ -166,19 +176,18 @@ func (s *BackendScriptsSuite) TestExecuteScriptOnExecutionNode_HappyPath() { s.setupExecutionNodes(s.block) s.setupENSuccessResponse(s.block.ID()) - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeExecutionNodesOnly + scripts := s.defaultBackend(execmock.NewScriptExecutor(s.T()), query_mode.IndexQueryModeExecutionNodesOnly) s.Run("GetAccount", func() { - s.testExecuteScriptAtLatestBlock(ctx, backend, codes.OK) + s.testExecuteScriptAtLatestBlock(ctx, scripts, codes.OK) }) s.Run("ExecuteScriptAtBlockID", func() { - s.testExecuteScriptAtBlockID(ctx, backend, codes.OK) + s.testExecuteScriptAtBlockID(ctx, scripts, codes.OK) }) s.Run("ExecuteScriptAtBlockHeight", func() { - s.testExecuteScriptAtBlockHeight(ctx, backend, codes.OK) + s.testExecuteScriptAtBlockHeight(ctx, scripts, codes.OK) }) } @@ -194,19 +203,18 @@ func (s *BackendScriptsSuite) TestExecuteScriptOnExecutionNode_Fails() { s.setupExecutionNodes(s.block) s.setupENFailingResponse(s.block.ID(), errToReturn) - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeExecutionNodesOnly + scripts := s.defaultBackend(execmock.NewScriptExecutor(s.T()), query_mode.IndexQueryModeExecutionNodesOnly) s.Run("GetAccount", func() { - s.testExecuteScriptAtLatestBlock(ctx, backend, statusCode) + s.testExecuteScriptAtLatestBlock(ctx, scripts, statusCode) }) s.Run("ExecuteScriptAtBlockID", func() { - s.testExecuteScriptAtBlockID(ctx, backend, statusCode) + s.testExecuteScriptAtBlockID(ctx, scripts, statusCode) }) s.Run("ExecuteScriptAtBlockHeight", func() { - s.testExecuteScriptAtBlockHeight(ctx, backend, statusCode) + s.testExecuteScriptAtBlockHeight(ctx, scripts, statusCode) }) } @@ -219,20 +227,18 @@ func (s *BackendScriptsSuite) TestExecuteScriptFromStorage_HappyPath() { scriptExecutor.On("ExecuteAtBlockHeight", mock.Anything, s.script, s.arguments, s.block.Header.Height). Return(expectedResponse, nil) - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeLocalOnly - backend.scriptExecutor = scriptExecutor + scripts := s.defaultBackend(scriptExecutor, query_mode.IndexQueryModeLocalOnly) s.Run("GetAccount - happy path", func() { - s.testExecuteScriptAtLatestBlock(ctx, backend, codes.OK) + s.testExecuteScriptAtLatestBlock(ctx, scripts, codes.OK) }) s.Run("GetAccountAtLatestBlock - happy path", func() { - s.testExecuteScriptAtBlockID(ctx, backend, codes.OK) + s.testExecuteScriptAtBlockID(ctx, scripts, codes.OK) }) s.Run("GetAccountAtBlockHeight - happy path", func() { - s.testExecuteScriptAtBlockHeight(ctx, backend, codes.OK) + s.testExecuteScriptAtBlockHeight(ctx, scripts, codes.OK) }) } @@ -242,10 +248,7 @@ func (s *BackendScriptsSuite) TestExecuteScriptFromStorage_Fails() { ctx := context.Background() scriptExecutor := execmock.NewScriptExecutor(s.T()) - - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeLocalOnly - backend.scriptExecutor = scriptExecutor + scripts := s.defaultBackend(scriptExecutor, query_mode.IndexQueryModeLocalOnly) testCases := []struct { err error @@ -278,15 +281,15 @@ func (s *BackendScriptsSuite) TestExecuteScriptFromStorage_Fails() { Return(nil, tt.err).Times(3) s.Run(fmt.Sprintf("GetAccount - fails with %v", tt.err), func() { - s.testExecuteScriptAtLatestBlock(ctx, backend, tt.statusCode) + s.testExecuteScriptAtLatestBlock(ctx, scripts, tt.statusCode) }) s.Run(fmt.Sprintf("GetAccountAtLatestBlock - fails with %v", tt.err), func() { - s.testExecuteScriptAtBlockID(ctx, backend, tt.statusCode) + s.testExecuteScriptAtBlockID(ctx, scripts, tt.statusCode) }) s.Run(fmt.Sprintf("GetAccountAtBlockHeight - fails with %v", tt.err), func() { - s.testExecuteScriptAtBlockHeight(ctx, backend, tt.statusCode) + s.testExecuteScriptAtBlockHeight(ctx, scripts, tt.statusCode) }) } } @@ -309,10 +312,7 @@ func (s *BackendScriptsSuite) TestExecuteScriptWithFailover_HappyPath() { s.setupENSuccessResponse(s.block.ID()) scriptExecutor := execmock.NewScriptExecutor(s.T()) - - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeFailover - backend.scriptExecutor = scriptExecutor + scripts := s.defaultBackend(scriptExecutor, query_mode.IndexQueryModeFailover) for _, errToReturn := range errors { // configure local script executor to fail @@ -320,15 +320,15 @@ func (s *BackendScriptsSuite) TestExecuteScriptWithFailover_HappyPath() { Return(nil, errToReturn).Times(3) s.Run(fmt.Sprintf("ExecuteScriptAtLatestBlock - recovers %v", errToReturn), func() { - s.testExecuteScriptAtLatestBlock(ctx, backend, codes.OK) + s.testExecuteScriptAtLatestBlock(ctx, scripts, codes.OK) }) s.Run(fmt.Sprintf("ExecuteScriptAtBlockID - recovers %v", errToReturn), func() { - s.testExecuteScriptAtBlockID(ctx, backend, codes.OK) + s.testExecuteScriptAtBlockID(ctx, scripts, codes.OK) }) s.Run(fmt.Sprintf("ExecuteScriptAtBlockHeight - recovers %v", errToReturn), func() { - s.testExecuteScriptAtBlockHeight(ctx, backend, codes.OK) + s.testExecuteScriptAtBlockHeight(ctx, scripts, codes.OK) }) } } @@ -340,10 +340,7 @@ func (s *BackendScriptsSuite) TestExecuteScriptWithFailover_SkippedForCorrectCod // configure local script executor to fail scriptExecutor := execmock.NewScriptExecutor(s.T()) - - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeFailover - backend.scriptExecutor = scriptExecutor + scripts := s.defaultBackend(scriptExecutor, query_mode.IndexQueryModeFailover) testCases := []struct { err error @@ -365,15 +362,15 @@ func (s *BackendScriptsSuite) TestExecuteScriptWithFailover_SkippedForCorrectCod Times(3) s.Run(fmt.Sprintf("ExecuteScriptAtLatestBlock - %s", tt.statusCode), func() { - s.testExecuteScriptAtLatestBlock(ctx, backend, tt.statusCode) + s.testExecuteScriptAtLatestBlock(ctx, scripts, tt.statusCode) }) s.Run(fmt.Sprintf("ExecuteScriptAtBlockID - %s", tt.statusCode), func() { - s.testExecuteScriptAtBlockID(ctx, backend, tt.statusCode) + s.testExecuteScriptAtBlockID(ctx, scripts, tt.statusCode) }) s.Run(fmt.Sprintf("ExecuteScriptAtBlockHeight - %s", tt.statusCode), func() { - s.testExecuteScriptAtBlockHeight(ctx, backend, tt.statusCode) + s.testExecuteScriptAtBlockHeight(ctx, scripts, tt.statusCode) }) } } @@ -396,20 +393,18 @@ func (s *BackendScriptsSuite) TestExecuteScriptWithFailover_ReturnsENErrors() { scriptExecutor.On("ExecuteAtBlockHeight", mock.Anything, mock.Anything, mock.Anything, s.block.Header.Height). Return(nil, storage.ErrHeightNotIndexed) - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeFailover - backend.scriptExecutor = scriptExecutor + scripts := s.defaultBackend(scriptExecutor, query_mode.IndexQueryModeFailover) s.Run("ExecuteScriptAtLatestBlock", func() { - s.testExecuteScriptAtLatestBlock(ctx, backend, statusCode) + s.testExecuteScriptAtLatestBlock(ctx, scripts, statusCode) }) s.Run("ExecuteScriptAtBlockID", func() { - s.testExecuteScriptAtBlockID(ctx, backend, statusCode) + s.testExecuteScriptAtBlockID(ctx, scripts, statusCode) }) s.Run("ExecuteScriptAtBlockHeight", func() { - s.testExecuteScriptAtBlockHeight(ctx, backend, statusCode) + s.testExecuteScriptAtBlockHeight(ctx, scripts, statusCode) }) } @@ -417,10 +412,7 @@ func (s *BackendScriptsSuite) TestExecuteScriptWithFailover_ReturnsENErrors() { // inconsistent func (s *BackendScriptsSuite) TestExecuteScriptAtLatestBlockFromStorage_InconsistentState() { scriptExecutor := execmock.NewScriptExecutor(s.T()) - - backend := s.defaultBackend() - backend.scriptExecMode = IndexQueryModeLocalOnly - backend.scriptExecutor = scriptExecutor + scripts := s.defaultBackend(scriptExecutor, query_mode.IndexQueryModeLocalOnly) s.Run(fmt.Sprintf("ExecuteScriptAtLatestBlock - fails with %v", "inconsistent node's state"), func() { s.state.On("Sealed").Return(s.snapshot, nil) @@ -432,54 +424,54 @@ func (s *BackendScriptsSuite) TestExecuteScriptAtLatestBlockFromStorage_Inconsis signalerCtx := irrecoverable.WithSignalerContext(context.Background(), irrecoverable.NewMockSignalerContextExpectError(s.T(), context.Background(), signCtxErr)) - actual, err := backend.ExecuteScriptAtLatestBlock(signalerCtx, s.script, s.arguments) + actual, err := scripts.ExecuteScriptAtLatestBlock(signalerCtx, s.script, s.arguments) s.Require().Error(err) s.Require().Nil(actual) }) } -func (s *BackendScriptsSuite) testExecuteScriptAtLatestBlock(ctx context.Context, backend *backendScripts, statusCode codes.Code) { +func (s *BackendScriptsSuite) testExecuteScriptAtLatestBlock(ctx context.Context, scripts *Scripts, statusCode codes.Code) { s.state.On("Sealed").Return(s.snapshot, nil).Once() s.snapshot.On("Head").Return(s.block.Header, nil).Once() if statusCode == codes.OK { - actual, err := backend.ExecuteScriptAtLatestBlock(ctx, s.script, s.arguments) + actual, err := scripts.ExecuteScriptAtLatestBlock(ctx, s.script, s.arguments) s.Require().NoError(err) s.Require().Equal(expectedResponse, actual) } else { - actual, err := backend.ExecuteScriptAtLatestBlock(ctx, s.failingScript, s.arguments) + actual, err := scripts.ExecuteScriptAtLatestBlock(ctx, s.failingScript, s.arguments) s.Require().Error(err) s.Require().Equal(statusCode, status.Code(err), "error code mismatch: expected %d, got %d: %s", statusCode, status.Code(err), err) s.Require().Nil(actual) } } -func (s *BackendScriptsSuite) testExecuteScriptAtBlockID(ctx context.Context, backend *backendScripts, statusCode codes.Code) { +func (s *BackendScriptsSuite) testExecuteScriptAtBlockID(ctx context.Context, scripts *Scripts, statusCode codes.Code) { blockID := s.block.ID() s.headers.On("ByBlockID", blockID).Return(s.block.Header, nil).Once() if statusCode == codes.OK { - actual, err := backend.ExecuteScriptAtBlockID(ctx, blockID, s.script, s.arguments) + actual, err := scripts.ExecuteScriptAtBlockID(ctx, blockID, s.script, s.arguments) s.Require().NoError(err) s.Require().Equal(expectedResponse, actual) } else { - actual, err := backend.ExecuteScriptAtBlockID(ctx, blockID, s.failingScript, s.arguments) + actual, err := scripts.ExecuteScriptAtBlockID(ctx, blockID, s.failingScript, s.arguments) s.Require().Error(err) s.Require().Equal(statusCode, status.Code(err), "error code mismatch: expected %d, got %d: %s", statusCode, status.Code(err), err) s.Require().Nil(actual) } } -func (s *BackendScriptsSuite) testExecuteScriptAtBlockHeight(ctx context.Context, backend *backendScripts, statusCode codes.Code) { +func (s *BackendScriptsSuite) testExecuteScriptAtBlockHeight(ctx context.Context, scripts *Scripts, statusCode codes.Code) { height := s.block.Header.Height s.headers.On("ByHeight", height).Return(s.block.Header, nil).Once() if statusCode == codes.OK { - actual, err := backend.ExecuteScriptAtBlockHeight(ctx, height, s.script, s.arguments) + actual, err := scripts.ExecuteScriptAtBlockHeight(ctx, height, s.script, s.arguments) s.Require().NoError(err) s.Require().Equal(expectedResponse, actual) } else { - actual, err := backend.ExecuteScriptAtBlockHeight(ctx, height, s.failingScript, s.arguments) + actual, err := scripts.ExecuteScriptAtBlockHeight(ctx, height, s.failingScript, s.arguments) s.Require().Error(err) s.Require().Equalf(statusCode, status.Code(err), "error code mismatch: expected %d, got %d: %s", statusCode, status.Code(err), err) s.Require().Nil(actual) diff --git a/engine/access/rpc/backend/transactions/error_messages/mock/provider.go b/engine/access/rpc/backend/transactions/error_messages/mock/provider.go new file mode 100644 index 00000000000..bc3b8915985 --- /dev/null +++ b/engine/access/rpc/backend/transactions/error_messages/mock/provider.go @@ -0,0 +1,217 @@ +// Code generated by mockery v2.53.3. DO NOT EDIT. + +package mock + +import ( + context "context" + + execution "github.com/onflow/flow/protobuf/go/flow/execution" + + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" +) + +// Provider is an autogenerated mock type for the Provider type +type Provider struct { + mock.Mock +} + +// ErrorMessageByBlockIDFromAnyEN provides a mock function with given fields: ctx, execNodes, req +func (_m *Provider) ErrorMessageByBlockIDFromAnyEN(ctx context.Context, execNodes flow.GenericIdentityList[flow.IdentitySkeleton], req *execution.GetTransactionErrorMessagesByBlockIDRequest) ([]*execution.GetTransactionErrorMessagesResponse_Result, *flow.IdentitySkeleton, error) { + ret := _m.Called(ctx, execNodes, req) + + if len(ret) == 0 { + panic("no return value specified for ErrorMessageByBlockIDFromAnyEN") + } + + var r0 []*execution.GetTransactionErrorMessagesResponse_Result + var r1 *flow.IdentitySkeleton + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, flow.GenericIdentityList[flow.IdentitySkeleton], *execution.GetTransactionErrorMessagesByBlockIDRequest) ([]*execution.GetTransactionErrorMessagesResponse_Result, *flow.IdentitySkeleton, error)); ok { + return rf(ctx, execNodes, req) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.GenericIdentityList[flow.IdentitySkeleton], *execution.GetTransactionErrorMessagesByBlockIDRequest) []*execution.GetTransactionErrorMessagesResponse_Result); ok { + r0 = rf(ctx, execNodes, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*execution.GetTransactionErrorMessagesResponse_Result) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.GenericIdentityList[flow.IdentitySkeleton], *execution.GetTransactionErrorMessagesByBlockIDRequest) *flow.IdentitySkeleton); ok { + r1 = rf(ctx, execNodes, req) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*flow.IdentitySkeleton) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, flow.GenericIdentityList[flow.IdentitySkeleton], *execution.GetTransactionErrorMessagesByBlockIDRequest) error); ok { + r2 = rf(ctx, execNodes, req) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ErrorMessageByIndex provides a mock function with given fields: ctx, blockID, height, index +func (_m *Provider) ErrorMessageByIndex(ctx context.Context, blockID flow.Identifier, height uint64, index uint32) (string, error) { + ret := _m.Called(ctx, blockID, height, index) + + if len(ret) == 0 { + panic("no return value specified for ErrorMessageByIndex") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64, uint32) (string, error)); ok { + return rf(ctx, blockID, height, index) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64, uint32) string); ok { + r0 = rf(ctx, blockID, height, index) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, uint64, uint32) error); ok { + r1 = rf(ctx, blockID, height, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ErrorMessageByIndexFromAnyEN provides a mock function with given fields: ctx, execNodes, req +func (_m *Provider) ErrorMessageByIndexFromAnyEN(ctx context.Context, execNodes flow.GenericIdentityList[flow.IdentitySkeleton], req *execution.GetTransactionErrorMessageByIndexRequest) (*execution.GetTransactionErrorMessageResponse, error) { + ret := _m.Called(ctx, execNodes, req) + + if len(ret) == 0 { + panic("no return value specified for ErrorMessageByIndexFromAnyEN") + } + + var r0 *execution.GetTransactionErrorMessageResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.GenericIdentityList[flow.IdentitySkeleton], *execution.GetTransactionErrorMessageByIndexRequest) (*execution.GetTransactionErrorMessageResponse, error)); ok { + return rf(ctx, execNodes, req) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.GenericIdentityList[flow.IdentitySkeleton], *execution.GetTransactionErrorMessageByIndexRequest) *execution.GetTransactionErrorMessageResponse); ok { + r0 = rf(ctx, execNodes, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.GetTransactionErrorMessageResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.GenericIdentityList[flow.IdentitySkeleton], *execution.GetTransactionErrorMessageByIndexRequest) error); ok { + r1 = rf(ctx, execNodes, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ErrorMessageByTransactionID provides a mock function with given fields: ctx, blockID, height, transactionID +func (_m *Provider) ErrorMessageByTransactionID(ctx context.Context, blockID flow.Identifier, height uint64, transactionID flow.Identifier) (string, error) { + ret := _m.Called(ctx, blockID, height, transactionID) + + if len(ret) == 0 { + panic("no return value specified for ErrorMessageByTransactionID") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64, flow.Identifier) (string, error)); ok { + return rf(ctx, blockID, height, transactionID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64, flow.Identifier) string); ok { + r0 = rf(ctx, blockID, height, transactionID) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, uint64, flow.Identifier) error); ok { + r1 = rf(ctx, blockID, height, transactionID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ErrorMessageFromAnyEN provides a mock function with given fields: ctx, execNodes, req +func (_m *Provider) ErrorMessageFromAnyEN(ctx context.Context, execNodes flow.GenericIdentityList[flow.IdentitySkeleton], req *execution.GetTransactionErrorMessageRequest) (*execution.GetTransactionErrorMessageResponse, error) { + ret := _m.Called(ctx, execNodes, req) + + if len(ret) == 0 { + panic("no return value specified for ErrorMessageFromAnyEN") + } + + var r0 *execution.GetTransactionErrorMessageResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.GenericIdentityList[flow.IdentitySkeleton], *execution.GetTransactionErrorMessageRequest) (*execution.GetTransactionErrorMessageResponse, error)); ok { + return rf(ctx, execNodes, req) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.GenericIdentityList[flow.IdentitySkeleton], *execution.GetTransactionErrorMessageRequest) *execution.GetTransactionErrorMessageResponse); ok { + r0 = rf(ctx, execNodes, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.GetTransactionErrorMessageResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.GenericIdentityList[flow.IdentitySkeleton], *execution.GetTransactionErrorMessageRequest) error); ok { + r1 = rf(ctx, execNodes, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ErrorMessagesByBlockID provides a mock function with given fields: ctx, blockID, height +func (_m *Provider) ErrorMessagesByBlockID(ctx context.Context, blockID flow.Identifier, height uint64) (map[flow.Identifier]string, error) { + ret := _m.Called(ctx, blockID, height) + + if len(ret) == 0 { + panic("no return value specified for ErrorMessagesByBlockID") + } + + var r0 map[flow.Identifier]string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64) (map[flow.Identifier]string, error)); ok { + return rf(ctx, blockID, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64) map[flow.Identifier]string); ok { + r0 = rf(ctx, blockID, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[flow.Identifier]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, uint64) error); ok { + r1 = rf(ctx, blockID, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewProvider creates a new instance of Provider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *Provider { + mock := &Provider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/backend/transactions/error_messages/provider.go b/engine/access/rpc/backend/transactions/error_messages/provider.go new file mode 100644 index 00000000000..20c96d7f5fa --- /dev/null +++ b/engine/access/rpc/backend/transactions/error_messages/provider.go @@ -0,0 +1,472 @@ +package error_messages + +import ( + "context" + "errors" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/rpc/convert" + + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +const DefaultFailedErrorMessage = "failed" + +// Provider declares the lookup transaction error methods by different input parameters. +type Provider interface { + // ErrorMessageByTransactionID is a function type for getting transaction error message by block ID and transaction ID. + // Expected errors during normal operation: + // - InsufficientExecutionReceipts - found insufficient receipts for given block ID. + // - status.Error - remote GRPC call to EN has failed. + ErrorMessageByTransactionID(ctx context.Context, blockID flow.Identifier, height uint64, transactionID flow.Identifier) (string, error) + + // ErrorMessageByIndex is a function type for getting transaction error message by index. + // Expected errors during normal operation: + // - InsufficientExecutionReceipts - found insufficient receipts for given block ID. + // - status.Error - remote GRPC call to EN has failed. + ErrorMessageByIndex(ctx context.Context, blockID flow.Identifier, height uint64, index uint32) (string, error) + + // ErrorMessagesByBlockID is a function type for getting transaction error messages by block ID. + // Expected errors during normal operation: + // - InsufficientExecutionReceipts - found insufficient receipts for given block ID. + // - status.Error - remote GRPC call to EN has failed. + ErrorMessagesByBlockID(ctx context.Context, blockID flow.Identifier, height uint64) (map[flow.Identifier]string, error) + + // ErrorMessageFromAnyEN performs an RPC call using available nodes passed as argument. + // List of nodes must be non-empty otherwise an error will be returned. + // Expected errors during normal operation: + // - status.Error - GRPC call failed, some of possible codes are: + // - codes.NotFound - request cannot be served by EN because of absence of data. + // - codes.Unavailable - remote node is not unavailable. + ErrorMessageFromAnyEN( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionErrorMessageRequest, + ) (*execproto.GetTransactionErrorMessageResponse, error) + + // ErrorMessageByIndexFromAnyEN performs an RPC call using available nodes passed as argument. + // List of nodes must be non-empty otherwise an error will be returned. + // Expected errors during normal operation: + // - status.Error - GRPC call failed, some of possible codes are: + // - codes.NotFound - request cannot be served by EN because of absence of data. + // - codes.Unavailable - remote node is not unavailable. + ErrorMessageByIndexFromAnyEN( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionErrorMessageByIndexRequest, + ) (*execproto.GetTransactionErrorMessageResponse, error) + + // ErrorMessageByBlockIDFromAnyEN performs an RPC call using available nodes passed as argument. + // List of nodes must be non-empty otherwise an error will be returned. + // Expected errors during normal operation: + // - status.Error - GRPC call failed, some of possible codes are: + // - codes.NotFound - request cannot be served by EN because of absence of data. + // - codes.Unavailable - remote node is not unavailable. + ErrorMessageByBlockIDFromAnyEN( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionErrorMessagesByBlockIDRequest, + ) ([]*execproto.GetTransactionErrorMessagesResponse_Result, *flow.IdentitySkeleton, error) +} + +type ProviderImpl struct { + log zerolog.Logger + + txResultErrorMessages storage.TransactionResultErrorMessages + txResultsIndex *index.TransactionResultsIndex + + connFactory connection.ConnectionFactory + nodeCommunicator node_communicator.Communicator + execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider +} + +var _ Provider = (*ProviderImpl)(nil) + +func NewTxErrorMessageProvider( + log zerolog.Logger, + txResultErrorMessages storage.TransactionResultErrorMessages, + txResultsIndex *index.TransactionResultsIndex, + connFactory connection.ConnectionFactory, + nodeCommunicator node_communicator.Communicator, + execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider, +) *ProviderImpl { + return &ProviderImpl{ + log: log, + txResultErrorMessages: txResultErrorMessages, + txResultsIndex: txResultsIndex, + connFactory: connFactory, + nodeCommunicator: nodeCommunicator, + execNodeIdentitiesProvider: execNodeIdentitiesProvider, + } +} + +// ErrorMessageByTransactionID returns transaction error message for specified transaction. +// If transaction error messages are stored locally, they will be checked first in local storage. +// If error messages are not stored locally, an RPC call will be made to the EN to fetch message. +// +// Expected errors during normal operation: +// - InsufficientExecutionReceipts - found insufficient receipts for the given block ID. +// - status.Error - remote GRPC call to EN has failed. +func (e *ProviderImpl) ErrorMessageByTransactionID( + ctx context.Context, + blockID flow.Identifier, + height uint64, + transactionID flow.Identifier, +) (string, error) { + if e.txResultErrorMessages != nil { + res, err := e.txResultErrorMessages.ByBlockIDTransactionID(blockID, transactionID) + if err == nil { + return res.ErrorMessage, nil + } + } + + execNodes, err := e.execNodeIdentitiesProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + if common.IsInsufficientExecutionReceipts(err) { + return "", status.Error(codes.NotFound, err.Error()) + } + return "", rpc.ConvertError(err, "failed to select execution nodes", codes.Internal) + } + req := &execproto.GetTransactionErrorMessageRequest{ + BlockId: convert.IdentifierToMessage(blockID), + TransactionId: convert.IdentifierToMessage(transactionID), + } + + resp, err := e.ErrorMessageFromAnyEN(ctx, execNodes, req) + if err != nil { + // If no execution nodes return a valid response, + // return a static message "failed". + txResult, err := e.txResultsIndex.ByBlockIDTransactionID(blockID, height, transactionID) + if err != nil { + return "", rpc.ConvertStorageError(err) + } + + if txResult.Failed { + return DefaultFailedErrorMessage, nil + } + + // in case tx result is not failed + return "", nil + } + + return resp.ErrorMessage, nil +} + +// ErrorMessageByIndex returns the transaction error message for a specified transaction using its index. +// If transaction error messages are stored locally, they will be checked first in local storage. +// If error messages are not stored locally, an RPC call will be made to the EN to fetch message. +// +// Expected errors during normal operation: +// - InsufficientExecutionReceipts - found insufficient receipts for the given block ID. +// - status.Error - remote GRPC call to EN has failed. +func (e *ProviderImpl) ErrorMessageByIndex( + ctx context.Context, + blockID flow.Identifier, + height uint64, + index uint32, +) (string, error) { + if e.txResultErrorMessages != nil { + res, err := e.txResultErrorMessages.ByBlockIDTransactionIndex(blockID, index) + if err == nil { + return res.ErrorMessage, nil + } + } + + execNodes, err := e.execNodeIdentitiesProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + if common.IsInsufficientExecutionReceipts(err) { + return "", status.Error(codes.NotFound, err.Error()) + } + return "", rpc.ConvertError(err, "failed to select execution nodes", codes.Internal) + } + req := &execproto.GetTransactionErrorMessageByIndexRequest{ + BlockId: convert.IdentifierToMessage(blockID), + Index: index, + } + + resp, err := e.ErrorMessageByIndexFromAnyEN(ctx, execNodes, req) + if err != nil { + // If no execution nodes return a valid response, + // return a static message "failed" + txResult, err := e.txResultsIndex.ByBlockIDTransactionIndex(blockID, height, index) + if err != nil { + return "", rpc.ConvertStorageError(err) + } + + if txResult.Failed { + return DefaultFailedErrorMessage, nil + } + + // in case tx result is not failed + return "", nil + } + + return resp.ErrorMessage, nil +} + +// ErrorMessagesByBlockID returns all error messages for failed transactions by blockID. +// If transaction error messages are stored locally, they will be checked first in local storage. +// If error messages are not stored locally, an RPC call will be made to the EN to fetch messages. +// +// Expected errors during normal operation: +// - InsufficientExecutionReceipts - found insufficient receipts for the given block ID. +// - status.Error - remote GRPC call to EN has failed. +func (e *ProviderImpl) ErrorMessagesByBlockID( + ctx context.Context, + blockID flow.Identifier, + height uint64, +) (map[flow.Identifier]string, error) { + result := make(map[flow.Identifier]string) + + if e.txResultErrorMessages != nil { + res, err := e.txResultErrorMessages.ByBlockID(blockID) + if err == nil { + for _, value := range res { + result[value.TransactionID] = value.ErrorMessage + } + + return result, nil + } + } + + execNodes, err := e.execNodeIdentitiesProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + if common.IsInsufficientExecutionReceipts(err) { + return nil, status.Error(codes.NotFound, err.Error()) + } + return nil, rpc.ConvertError(err, "failed to select execution nodes", codes.Internal) + } + req := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: convert.IdentifierToMessage(blockID), + } + + resp, _, err := e.ErrorMessageByBlockIDFromAnyEN(ctx, execNodes, req) + if err != nil { + // If no execution nodes return a valid response, + // return a static message "failed" + txResults, err := e.txResultsIndex.ByBlockID(blockID, height) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + for _, txResult := range txResults { + if txResult.Failed { + result[txResult.TransactionID] = DefaultFailedErrorMessage + } + } + + return result, nil + } + + for _, value := range resp { + result[convert.MessageToIdentifier(value.TransactionId)] = value.ErrorMessage + } + + return result, nil +} + +// ErrorMessageFromAnyEN performs an RPC call using available nodes passed as argument. +// List of nodes must be non-empty otherwise an error will be returned. +// Expected errors during normal operation: +// - status.Error - GRPC call failed, some of possible codes are: +// - codes.NotFound - request cannot be served by EN because of absence of data. +// - codes.Unavailable - remote node is not unavailable. +func (e *ProviderImpl) ErrorMessageFromAnyEN( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionErrorMessageRequest, +) (*execproto.GetTransactionErrorMessageResponse, error) { + // if we were passed 0 execution nodes add a specific error + if len(execNodes) == 0 { + return nil, errors.New("zero execution nodes") + } + + var resp *execproto.GetTransactionErrorMessageResponse + errToReturn := e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + resp, err = e.tryGetTransactionErrorMessageFromEN(ctx, node, req) + if err == nil { + e.log.Debug(). + Str("execution_node", node.String()). + Hex("block_id", req.GetBlockId()). + Hex("transaction_id", req.GetTransactionId()). + Msg("Successfully got transaction error message from any node") + return nil + } + return err + }, + nil, + ) + + // log the errors + if errToReturn != nil { + e.log.Err(errToReturn).Msg("failed to get transaction error message from execution nodes") + return nil, errToReturn + } + + return resp, nil +} + +// ErrorMessageByIndexFromAnyEN performs an RPC call using available nodes passed as argument. +// List of nodes must be non-empty otherwise an error will be returned. +// Expected errors during normal operation: +// - status.Error - GRPC call failed, some of possible codes are: +// - codes.NotFound - request cannot be served by EN because of absence of data. +// - codes.Unavailable - remote node is not unavailable. +func (e *ProviderImpl) ErrorMessageByIndexFromAnyEN( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionErrorMessageByIndexRequest, +) (*execproto.GetTransactionErrorMessageResponse, error) { + // if we were passed 0 execution nodes add a specific error + if len(execNodes) == 0 { + return nil, errors.New("zero execution nodes") + } + + var resp *execproto.GetTransactionErrorMessageResponse + errToReturn := e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + resp, err = e.tryGetTransactionErrorMessageByIndexFromEN(ctx, node, req) + if err == nil { + e.log.Debug(). + Str("execution_node", node.String()). + Hex("block_id", req.GetBlockId()). + Uint32("index", req.GetIndex()). + Msg("Successfully got transaction error message by index from any node") + return nil + } + return err + }, + nil, + ) + if errToReturn != nil { + e.log.Err(errToReturn).Msg("failed to get transaction error message by index from execution nodes") + return nil, errToReturn + } + + return resp, nil +} + +// ErrorMessageByBlockIDFromAnyEN performs an RPC call using available nodes passed as argument. +// List of nodes must be non-empty otherwise an error will be returned. +// Expected errors during normal operation: +// - status.Error - GRPC call failed, some of possible codes are: +// - codes.NotFound - request cannot be served by EN because of absence of data. +// - codes.Unavailable - remote node is not unavailable. +func (e *ProviderImpl) ErrorMessageByBlockIDFromAnyEN( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionErrorMessagesByBlockIDRequest, +) ([]*execproto.GetTransactionErrorMessagesResponse_Result, *flow.IdentitySkeleton, error) { + // if we were passed 0 execution nodes add a specific error + if len(execNodes) == 0 { + return nil, nil, errors.New("zero execution nodes") + } + + var resp *execproto.GetTransactionErrorMessagesResponse + var execNode *flow.IdentitySkeleton + + errToReturn := e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + execNode = node + resp, err = e.tryGetTransactionErrorMessagesByBlockIDFromEN(ctx, node, req) + if err == nil { + e.log.Debug(). + Str("execution_node", node.String()). + Hex("block_id", req.GetBlockId()). + Msg("Successfully got transaction error messages from any node") + return nil + } + return err + }, + nil, + ) + + // log the errors + if errToReturn != nil { + e.log.Err(errToReturn).Msg("failed to get transaction error messages from execution nodes") + return nil, nil, errToReturn + } + + return resp.GetResults(), execNode, nil +} + +// tryGetTransactionErrorMessageFromEN performs a grpc call to the specified execution node and returns response. +// +// Expected errors during normal operation: +// - status.Error - GRPC call failed, some of possible codes are: +// - codes.NotFound - request cannot be served by EN because of absence of data. +// - codes.Unavailable - remote node is not unavailable. +func (e *ProviderImpl) tryGetTransactionErrorMessageFromEN( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetTransactionErrorMessageRequest, +) (*execproto.GetTransactionErrorMessageResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + return execRPCClient.GetTransactionErrorMessage(ctx, req) +} + +// tryGetTransactionErrorMessageByIndexFromEN performs a grpc call to the specified execution node and returns response. +// Expected errors during normal operation: +// - status.Error - GRPC call failed, some of possible codes are: +// - codes.NotFound - request cannot be served by EN because of absence of data. +// - codes.Unavailable - remote node is not unavailable. +func (e *ProviderImpl) tryGetTransactionErrorMessageByIndexFromEN( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetTransactionErrorMessageByIndexRequest, +) (*execproto.GetTransactionErrorMessageResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + return execRPCClient.GetTransactionErrorMessageByIndex(ctx, req) +} + +// tryGetTransactionErrorMessagesByBlockIDFromEN performs a grpc call to the specified execution node and returns response. +// Expected errors during normal operation: +// - status.Error - GRPC call failed, some of possible codes are: +// - codes.NotFound - request cannot be served by EN because of absence of data. +// - codes.Unavailable - remote node is not unavailable. +func (e *ProviderImpl) tryGetTransactionErrorMessagesByBlockIDFromEN( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetTransactionErrorMessagesByBlockIDRequest, +) (*execproto.GetTransactionErrorMessagesResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + return execRPCClient.GetTransactionErrorMessagesByBlockID(ctx, req) +} diff --git a/engine/access/rpc/backend/transactions/error_messages/provider_test.go b/engine/access/rpc/backend/transactions/error_messages/provider_test.go new file mode 100644 index 00000000000..3357d2aada3 --- /dev/null +++ b/engine/access/rpc/backend/transactions/error_messages/provider_test.go @@ -0,0 +1,966 @@ +package error_messages + +import ( + "context" + "fmt" + "math/rand" + "os" + "testing" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/index" + accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" + protocolmock "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +const expectedErrorMsg = "expected test error" + +type Suite struct { + suite.Suite + + log zerolog.Logger + state *protocolmock.State + snapshot *protocolmock.Snapshot + + block flow.Block + blockID flow.Identifier + fixedExecutionNodes flow.IdentityList + fixedExecutionNodeIDs flow.IdentifierList + preferredExecutionNodeIDs flow.IdentifierList + + receipts *storagemock.ExecutionReceipts + lightTxResults *storagemock.LightTransactionResults + txResultErrorMessages *storagemock.TransactionResultErrorMessages + + executionAPIClient *accessmock.ExecutionAPIClient + + nodeCommunicator *node_communicator.NodeCommunicator + nodeProvider *commonrpc.ExecutionNodeIdentitiesProvider + connectionFactory *connectionmock.ConnectionFactory + + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + txResultsIndex *index.TransactionResultsIndex +} + +func TestSuite(t *testing.T) { + suite.Run(t, new(Suite)) +} + +func (suite *Suite) SetupTest() { + //suite.log = unittest.Logger() + suite.log = zerolog.New(zerolog.ConsoleWriter{Out: os.Stdout}).With().Timestamp().Logger() + suite.snapshot = protocolmock.NewSnapshot(suite.T()) + + header := unittest.BlockHeaderFixture() + params := protocolmock.NewParams(suite.T()) + params.On("FinalizedRoot").Return(header, nil).Maybe() + params.On("SporkID").Return(unittest.IdentifierFixture(), nil).Maybe() + params.On("SporkRootBlockHeight").Return(header.Height, nil).Maybe() + params.On("SealedRoot").Return(header, nil).Maybe() + + suite.state = protocolmock.NewState(suite.T()) + suite.state.On("Params").Return(params).Maybe() + + suite.receipts = storagemock.NewExecutionReceipts(suite.T()) + suite.lightTxResults = storagemock.NewLightTransactionResults(suite.T()) + suite.txResultErrorMessages = storagemock.NewTransactionResultErrorMessages(suite.T()) + suite.executionAPIClient = accessmock.NewExecutionAPIClient(suite.T()) + suite.connectionFactory = connectionmock.NewConnectionFactory(suite.T()) + suite.nodeCommunicator = node_communicator.NewNodeCommunicator(false) + + suite.block = unittest.BlockFixture() + suite.blockID = suite.block.ID() + _, suite.fixedExecutionNodes = suite.setupReceipts(&suite.block) + suite.fixedExecutionNodeIDs = suite.fixedExecutionNodes.NodeIDs() + suite.preferredExecutionNodeIDs = nil + + suite.nodeProvider = commonrpc.NewExecutionNodeIdentitiesProvider( + suite.log, + suite.state, + suite.receipts, + suite.preferredExecutionNodeIDs, + suite.fixedExecutionNodeIDs, + ) + + suite.reporter = syncmock.NewIndexReporter(suite.T()) + suite.indexReporter = index.NewReporter() + err := suite.indexReporter.Initialize(suite.reporter) + suite.Require().NoError(err) + suite.txResultsIndex = index.NewTransactionResultsIndex(suite.indexReporter, suite.lightTxResults) +} + +func (suite *Suite) TestLookupByTxID_FromExecutionNode_HappyPath() { + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + + // Setup mock receipts and execution node identities. + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + + // the connection factory should be used to get the execution node client + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + // Mock the cache lookup for the transaction error message, returning "not found". + suite.txResultErrorMessages. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(nil, storage.ErrNotFound). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + // Mock the execution node API call to fetch the error message. + exeEventReq := &execproto.GetTransactionErrorMessageRequest{ + BlockId: suite.blockID[:], + TransactionId: failedTxId[:], + } + exeEventResp := &execproto.GetTransactionErrorMessageResponse{ + TransactionId: failedTxId[:], + ErrorMessage: expectedErrorMsg, + } + + suite.executionAPIClient. + On("GetTransactionErrorMessage", mock.Anything, exeEventReq). + Return(exeEventResp, nil). + Once() + + // Perform the lookup and assert that the error message is retrieved correctly. + errMsg, err := errMessageProvider.ErrorMessageByTransactionID( + context.Background(), + suite.blockID, + suite.block.Header.Height, + failedTxId, + ) + suite.Require().NoError(err) + suite.Require().Equal(expectedErrorMsg, errMsg) +} + +func (suite *Suite) TestLookupByTxID_FromStorage_HappyPath() { + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + failedTxIndex := rand.Uint32() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + // Mock the cache lookup for the transaction error message, returning a stored result. + suite.txResultErrorMessages. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(&flow.TransactionResultErrorMessage{ + TransactionID: failedTxId, + ErrorMessage: expectedErrorMsg, + Index: failedTxIndex, + ExecutorID: unittest.IdentifierFixture(), + }, nil). + Once() + + errMsg, err := errMessageProvider.ErrorMessageByTransactionID( + context.Background(), + suite.blockID, + suite.block.Header.Height, + failedTxId, + ) + suite.Require().NoError(err) + suite.Require().Equal(expectedErrorMsg, errMsg) +} + +func (suite *Suite) TestLookupByTxID_ExecNodeError_UnknownTx() { + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Header.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Header.Height+10, nil).Once() + + // lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessage", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // Setup mock that the transaction and tx error message is not found in the storage. + suite.txResultErrorMessages. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(nil, storage.ErrNotFound). + Once() + suite.lightTxResults. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(nil, storage.ErrNotFound). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByTransactionID( + context.Background(), + suite.blockID, + suite.block.Header.Height, + failedTxId, + ) + suite.Require().Error(err) + suite.Require().Equal(codes.NotFound, status.Code(err)) + suite.Require().Empty(errMsg) +} + +func (suite *Suite) TestLookupByTxID_ExecNodeError_TxResultNotFailed() { + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Header.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Header.Height+10, nil).Once() + + // Lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessage", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // Setup mock that the transaction error message is not found in storage. + suite.txResultErrorMessages. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(nil, storage.ErrNotFound). + Once() + + // Setup mock that the transaction result exists and is not failed. + suite.lightTxResults. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(&flow.LightTransactionResult{ + TransactionID: failedTxId, + Failed: false, + ComputationUsed: 0, + }, nil). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByTransactionID( + context.Background(), + suite.blockID, + suite.block.Header.Height, + failedTxId, + ) + suite.Require().NoError(err) + suite.Require().Empty(errMsg) +} + +func (suite *Suite) TestLookupByTxID_ExecNodeError_TxResultFailed() { + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Header.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Header.Height+10, nil).Once() + + // lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessage", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // Setup mock that the transaction error message is not found in storage. + suite.txResultErrorMessages. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(nil, storage.ErrNotFound). + Once() + + // Setup mock that the transaction result exists and is failed. + suite.lightTxResults. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(&flow.LightTransactionResult{ + TransactionID: failedTxId, + Failed: true, + ComputationUsed: 0, + }, nil). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByTransactionID( + context.Background(), + suite.blockID, + suite.block.Header.Height, + failedTxId, + ) + suite.Require().NoError(err) + suite.Require().Equal(errMsg, DefaultFailedErrorMessage) +} + +func (suite *Suite) TestLookupByIndex_FromExecutionNode_HappyPath() { + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + failedTxIndex := rand.Uint32() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot. + On("Identities", mock.Anything). + Return(suite.fixedExecutionNodes, nil). + Once() + + exeEventReq := &execproto.GetTransactionErrorMessageByIndexRequest{ + BlockId: suite.blockID[:], + Index: failedTxIndex, + } + exeEventResp := &execproto.GetTransactionErrorMessageResponse{ + TransactionId: failedTxId[:], + ErrorMessage: expectedErrorMsg, + } + suite.executionAPIClient. + On("GetTransactionErrorMessageByIndex", mock.Anything, exeEventReq). + Return(exeEventResp, nil). + Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + suite.txResultErrorMessages. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(nil, storage.ErrNotFound). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByIndex( + context.Background(), + suite.blockID, + suite.block.Header.Height, + failedTxIndex, + ) + suite.Require().NoError(err) + suite.Require().Equal(expectedErrorMsg, errMsg) +} + +// TestLookupTransactionErrorMessageByIndex_HappyPath verifies the lookup of a transaction error message +// by block ID and transaction index. +// It tests two cases: +// 1. Happy path where the error message is fetched from the EN if it is not found in the cache. +// 2. Happy path where the error message is served from the storage database if it exists. +func (suite *Suite) TestLookupTransactionErrorMessageByIndex_HappyPath() { + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + failedTxIndex := rand.Uint32() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + + suite.Run("happy path from EN", func() { + exeEventReq := &execproto.GetTransactionErrorMessageByIndexRequest{ + BlockId: suite.blockID[:], + Index: failedTxIndex, + } + exeEventResp := &execproto.GetTransactionErrorMessageResponse{ + TransactionId: failedTxId[:], + ErrorMessage: expectedErrorMsg, + } + suite.executionAPIClient. + On("GetTransactionErrorMessageByIndex", mock.Anything, exeEventReq). + Return(exeEventResp, nil). + Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + suite.txResultErrorMessages. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(nil, storage.ErrNotFound). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByIndex(context.Background(), suite.blockID, suite.block.Header.Height, failedTxIndex) + suite.Require().NoError(err) + suite.Require().Equal(expectedErrorMsg, errMsg) + }) + + suite.Run("happy path from storage db", func() { + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + suite.txResultErrorMessages. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(&flow.TransactionResultErrorMessage{ + TransactionID: failedTxId, + ErrorMessage: expectedErrorMsg, + Index: failedTxIndex, + ExecutorID: unittest.IdentifierFixture(), + }, nil). + Once() + + errMsg, err := errMessageProvider.ErrorMessageByIndex( + context.Background(), + suite.blockID, + suite.block.Header.Height, + failedTxIndex, + ) + suite.Require().NoError(err) + suite.Require().Equal(expectedErrorMsg, errMsg) + }) +} + +func (suite *Suite) TestLookupByIndex_ExecutionNodeError_UnknownTx() { + failedTxIndex := rand.Uint32() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Header.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Header.Height+10, nil).Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessageByIndex", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + // Setup mock that the transaction and tx error message is not found in the storage. + suite.txResultErrorMessages. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(nil, storage.ErrNotFound). + Once() + suite.lightTxResults. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(nil, storage.ErrNotFound). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByIndex( + context.Background(), + suite.blockID, + suite.block.Header.Height, + failedTxIndex, + ) + suite.Require().Error(err) + suite.Require().Equal(codes.NotFound, status.Code(err)) + suite.Require().Empty(errMsg) +} + +func (suite *Suite) TestLookupByIndex_ExecutionNodeError_TxResultNotFailed() { + failedTxIndex := rand.Uint32() + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Header.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Header.Height+10, nil).Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessageByIndex", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + // Setup mock that the transaction error message is not found in storage. + suite.txResultErrorMessages. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(nil, storage.ErrNotFound). + Once() + + // Setup mock that the transaction result exists and is not failed. + suite.lightTxResults. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(&flow.LightTransactionResult{ + TransactionID: failedTxId, + Failed: false, + ComputationUsed: 0, + }, nil). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByIndex( + context.Background(), + suite.blockID, + suite.block.Header.Height, + failedTxIndex, + ) + suite.Require().NoError(err) + suite.Require().Empty(errMsg) +} + +func (suite *Suite) TestLookupByIndex_ExecutionNodeError_TxResultFailed() { + failedTxIndex := rand.Uint32() + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Header.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Header.Height+10, nil).Once() + + // lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessageByIndex", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // Setup mock that the transaction error message is not found in storage. + suite.txResultErrorMessages. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(nil, storage.ErrNotFound). + Once() + + // Setup mock that the transaction result exists and is failed. + suite.lightTxResults. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(&flow.LightTransactionResult{ + TransactionID: failedTxId, + Failed: true, + ComputationUsed: 0, + }, nil). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByIndex( + context.Background(), + suite.blockID, + suite.block.Header.Height, + failedTxIndex, + ) + suite.Require().NoError(err) + suite.Require().Equal(errMsg, DefaultFailedErrorMessage) +} + +func (suite *Suite) TestLookupByBlockID_FromExecutionNode_HappyPath() { + resultsByBlockID := make([]flow.LightTransactionResult, 0) + for i := 0; i < 5; i++ { + resultsByBlockID = append(resultsByBlockID, flow.LightTransactionResult{ + TransactionID: unittest.IdentifierFixture(), + Failed: i%2 == 0, // create a mix of failed and non-failed transactions + ComputationUsed: 0, + }) + } + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + + // Mock the execution node API call to fetch the error messages. + exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: suite.blockID[:], + } + exeErrMessagesResp := &execproto.GetTransactionErrorMessagesResponse{} + for _, result := range resultsByBlockID { + r := result + if r.Failed { + errMsg := fmt.Sprintf("%s.%s", expectedErrorMsg, r.TransactionID) + exeErrMessagesResp.Results = append(exeErrMessagesResp.Results, &execproto.GetTransactionErrorMessagesResponse_Result{ + TransactionId: r.TransactionID[:], + ErrorMessage: errMsg, + }) + } + } + suite.executionAPIClient. + On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(exeErrMessagesResp, nil). + Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + suite.txResultErrorMessages. + On("ByBlockID", suite.blockID). + Return(nil, storage.ErrNotFound). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMessages, err := errMessageProvider.ErrorMessagesByBlockID( + context.Background(), + suite.blockID, + suite.block.Header.Height, + ) + suite.Require().NoError(err) + suite.Require().Len(errMessages, len(exeErrMessagesResp.Results)) + for _, expectedResult := range exeErrMessagesResp.Results { + errMsg, ok := errMessages[convert.MessageToIdentifier(expectedResult.TransactionId)] + suite.Require().True(ok) + suite.Assert().Equal(expectedResult.ErrorMessage, errMsg) + } +} + +func (suite *Suite) TestLookupByBlockID_FromStorage_HappyPath() { + resultsByBlockID := make([]flow.LightTransactionResult, 0) + for i := 0; i < 5; i++ { + resultsByBlockID = append(resultsByBlockID, flow.LightTransactionResult{ + TransactionID: unittest.IdentifierFixture(), + Failed: i%2 == 0, // create a mix of failed and non-failed transactions + ComputationUsed: 0, + }) + } + + var txErrorMessages []flow.TransactionResultErrorMessage + for i, result := range resultsByBlockID { + if result.Failed { + errMsg := fmt.Sprintf("%s.%s", expectedErrorMsg, result.TransactionID) + + txErrorMessages = append(txErrorMessages, + flow.TransactionResultErrorMessage{ + TransactionID: result.TransactionID, + ErrorMessage: errMsg, + Index: uint32(i), + ExecutorID: unittest.IdentifierFixture(), + }) + } + } + suite.txResultErrorMessages. + On("ByBlockID", suite.blockID). + Return(txErrorMessages, nil). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMessages, err := errMessageProvider.ErrorMessagesByBlockID( + context.Background(), + suite.blockID, + suite.block.Header.Height, + ) + suite.Require().NoError(err) + suite.Require().Len(errMessages, len(txErrorMessages)) + + for _, expected := range txErrorMessages { + errMsg, ok := errMessages[expected.TransactionID] + suite.Require().True(ok) + suite.Assert().Equal(expected.ErrorMessage, errMsg) + } +} + +func (suite *Suite) TestLookupByBlockID_ExecutionNodeError_UnknownBlock() { + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Header.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Header.Height+10, nil).Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + suite.executionAPIClient. + On("GetTransactionErrorMessagesByBlockID", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + // Setup mock that the transaction and tx error messages is not found in the storage. + suite.txResultErrorMessages. + On("ByBlockID", suite.blockID). + Return(nil, storage.ErrNotFound). + Once() + suite.lightTxResults. + On("ByBlockID", suite.blockID). + Return(nil, storage.ErrNotFound). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + // Perform the lookup and expect a "NotFound" error with an empty error message. + errMsg, err := errMessageProvider.ErrorMessagesByBlockID( + context.Background(), + suite.blockID, + suite.block.Header.Height, + ) + suite.Require().Error(err) + suite.Require().Equal(codes.NotFound, status.Code(err)) + suite.Require().Empty(errMsg) +} + +func (suite *Suite) TestLookupByBlockID_ExecutionNodeError_TxResultNotFailed() { + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Header.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Header.Height+10, nil).Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessagesByBlockID", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + // Setup mock that the transaction error message is not found in storage. + suite.txResultErrorMessages. + On("ByBlockID", suite.blockID). + Return(nil, storage.ErrNotFound). + Once() + + // Setup mock that the transaction results exists and is not failed. + suite.lightTxResults. + On("ByBlockID", suite.blockID). + Return([]flow.LightTransactionResult{ + { + TransactionID: unittest.IdentifierFixture(), + Failed: false, + ComputationUsed: 0, + }, + { + TransactionID: unittest.IdentifierFixture(), + Failed: false, + ComputationUsed: 0, + }, + }, nil). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessagesByBlockID( + context.Background(), + suite.blockID, + suite.block.Header.Height, + ) + suite.Require().NoError(err) + suite.Require().Empty(errMsg) +} + +func (suite *Suite) TestLookupTransactionErrorMessagesByBlockID_FailedToFetch() { + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Header.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Header.Height+10, nil).Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessagesByBlockID", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + // Setup mock that the transaction error messages is not found in storage. + suite.txResultErrorMessages. + On("ByBlockID", suite.blockID). + Return(nil, storage.ErrNotFound). + Once() + + failedResultsByBlockID := []flow.LightTransactionResult{ + { + TransactionID: unittest.IdentifierFixture(), + Failed: true, + ComputationUsed: 0, + }, + { + TransactionID: unittest.IdentifierFixture(), + Failed: true, + ComputationUsed: 0, + }, + } + + suite.lightTxResults. + On("ByBlockID", suite.blockID). + Return(failedResultsByBlockID, nil). + Once() + + expectedTxErrorMessages := make(map[flow.Identifier]string) + for _, result := range failedResultsByBlockID { + if result.Failed { + expectedTxErrorMessages[result.TransactionID] = DefaultFailedErrorMessage + } + } + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + // Perform the lookup and expect the failed error messages to be returned. + errMsg, err := errMessageProvider.ErrorMessagesByBlockID( + context.Background(), + suite.blockID, + suite.block.Header.Height, + ) + suite.Require().NoError(err) + suite.Require().Len(errMsg, len(expectedTxErrorMessages)) + + for txID, expectedMessage := range expectedTxErrorMessages { + actualMessage, ok := errMsg[txID] + suite.Require().True(ok) + suite.Assert().Equal(expectedMessage, actualMessage) + } +} + +func (suite *Suite) setupReceipts(block *flow.Block) ([]*flow.ExecutionReceipt, flow.IdentityList) { + ids := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) + receipt1 := unittest.ReceiptForBlockFixture(block) + receipt1.ExecutorID = ids[0].NodeID + receipt2 := unittest.ReceiptForBlockFixture(block) + receipt2.ExecutorID = ids[1].NodeID + receipt1.ExecutionResult = receipt2.ExecutionResult + + receipts := flow.ExecutionReceiptList{receipt1, receipt2} + suite.receipts. + On("ByBlockID", block.ID()). + Return(receipts, nil). + Maybe() + + return receipts, ids +} diff --git a/engine/access/rpc/backend/transactions/provider/execution_node.go b/engine/access/rpc/backend/transactions/provider/execution_node.go new file mode 100644 index 00000000000..79ef6ec8398 --- /dev/null +++ b/engine/access/rpc/backend/transactions/provider/execution_node.go @@ -0,0 +1,469 @@ +package provider + +import ( + "context" + "errors" + + "github.com/onflow/flow/protobuf/go/flow/entities" + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +type ENTransactionProvider struct { + log zerolog.Logger + state protocol.State + + collections storage.Collections + + connFactory connection.ConnectionFactory + nodeCommunicator node_communicator.Communicator + nodeProvider *rpc.ExecutionNodeIdentitiesProvider + + txStatusDeriver *txstatus.TxStatusDeriver + + systemTxID flow.Identifier + systemTx *flow.TransactionBody +} + +var _ TransactionProvider = (*ENTransactionProvider)(nil) + +func NewENTransactionProvider( + log zerolog.Logger, + state protocol.State, + collections storage.Collections, + connFactory connection.ConnectionFactory, + nodeCommunicator node_communicator.Communicator, + execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider, + txStatusDeriver *txstatus.TxStatusDeriver, + systemTxID flow.Identifier, + systemTx *flow.TransactionBody, +) *ENTransactionProvider { + + return &ENTransactionProvider{ + log: log.With().Str("transaction_provider", "execution_node").Logger(), + state: state, + collections: collections, + connFactory: connFactory, + nodeCommunicator: nodeCommunicator, + nodeProvider: execNodeIdentitiesProvider, + txStatusDeriver: txStatusDeriver, + systemTxID: systemTxID, + systemTx: systemTx, + } +} + +func (e *ENTransactionProvider) TransactionResult( + ctx context.Context, + block *flow.Header, + transactionID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + blockID := block.ID() + // create an execution API request for events at blockID and transactionID + req := &execproto.GetTransactionResultRequest{ + BlockId: blockID[:], + TransactionId: transactionID[:], + } + + execNodes, err := e.nodeProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + // if no execution receipt were found, return a NotFound GRPC error + if common.IsInsufficientExecutionReceipts(err) { + return nil, status.Error(codes.NotFound, err.Error()) + } + return nil, err + } + + resp, err := e.getTransactionResultFromAnyExeNode(ctx, execNodes, req) + if err != nil { + return nil, err + } + + // tx body is irrelevant to status if it's in an executed block + txStatus, err := e.txStatusDeriver.DeriveTransactionStatus(block.Height, true) + if err != nil { + if !errors.Is(err, state.ErrUnknownSnapshotReference) { + irrecoverable.Throw(ctx, err) + } + return nil, rpc.ConvertStorageError(err) + } + + events, err := convert.MessagesToEventsWithEncodingConversion(resp.GetEvents(), resp.GetEventEncodingVersion(), requiredEventEncodingVersion) + if err != nil { + return nil, rpc.ConvertError(err, "failed to convert events to message", codes.Internal) + } + + return &accessmodel.TransactionResult{ + TransactionID: transactionID, + Status: txStatus, + StatusCode: uint(resp.GetStatusCode()), + Events: events, + ErrorMessage: resp.GetErrorMessage(), + BlockID: blockID, + BlockHeight: block.Height, + }, nil +} + +func (e *ENTransactionProvider) TransactionResultByIndex( + ctx context.Context, + block *flow.Block, + index uint32, + encodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + blockID := block.ID() + // create request and forward to EN + req := &execproto.GetTransactionByIndexRequest{ + BlockId: blockID[:], + Index: index, + } + + execNodes, err := e.nodeProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + if common.IsInsufficientExecutionReceipts(err) { + return nil, status.Error(codes.NotFound, err.Error()) + } + return nil, rpc.ConvertError(err, "failed to retrieve result from any execution node", codes.Internal) + } + + resp, err := e.getTransactionResultByIndexFromAnyExeNode(ctx, execNodes, req) + if err != nil { + return nil, rpc.ConvertError(err, "failed to retrieve result from execution node", codes.Internal) + } + + // tx body is irrelevant to status if it's in an executed block + txStatus, err := e.txStatusDeriver.DeriveTransactionStatus(block.Header.Height, true) + if err != nil { + if !errors.Is(err, state.ErrUnknownSnapshotReference) { + irrecoverable.Throw(ctx, err) + } + return nil, rpc.ConvertStorageError(err) + } + + events, err := convert.MessagesToEventsWithEncodingConversion(resp.GetEvents(), resp.GetEventEncodingVersion(), encodingVersion) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to convert events in blockID %x: %v", blockID, err) + } + + // convert to response, cache and return + return &accessmodel.TransactionResult{ + Status: txStatus, + StatusCode: uint(resp.GetStatusCode()), + Events: events, + ErrorMessage: resp.GetErrorMessage(), + BlockID: blockID, + BlockHeight: block.Header.Height, + }, nil +} + +func (e *ENTransactionProvider) TransactionResultsByBlockID( + ctx context.Context, + block *flow.Block, + requiredEventEncodingVersion entities.EventEncodingVersion, +) ([]*accessmodel.TransactionResult, error) { + blockID := block.ID() + req := &execproto.GetTransactionsByBlockIDRequest{ + BlockId: blockID[:], + } + + execNodes, err := e.nodeProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + if common.IsInsufficientExecutionReceipts(err) { + return nil, status.Error(codes.NotFound, err.Error()) + } + return nil, rpc.ConvertError(err, "failed to retrieve result from any execution node", codes.Internal) + } + + resp, err := e.getTransactionResultsByBlockIDFromAnyExeNode(ctx, execNodes, req) + if err != nil { + return nil, rpc.ConvertError(err, "failed to retrieve result from execution node", codes.Internal) + } + + results := make([]*accessmodel.TransactionResult, 0, len(resp.TransactionResults)) + i := 0 + errInsufficientResults := status.Errorf( + codes.Internal, + "number of transaction results returned by execution node is less than the number of transactions in the block", + ) + + for _, guarantee := range block.Payload.Guarantees { + collection, err := e.collections.LightByID(guarantee.CollectionID) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + for _, txID := range collection.Transactions { + // bounds check. this means the EN returned fewer transaction results than the transactions in the block + if i >= len(resp.TransactionResults) { + return nil, errInsufficientResults + } + txResult := resp.TransactionResults[i] + + // tx body is irrelevant to status if it's in an executed block + txStatus, err := e.txStatusDeriver.DeriveTransactionStatus(block.Header.Height, true) + if err != nil { + if !errors.Is(err, state.ErrUnknownSnapshotReference) { + irrecoverable.Throw(ctx, err) + } + return nil, rpc.ConvertStorageError(err) + } + events, err := convert.MessagesToEventsWithEncodingConversion(txResult.GetEvents(), resp.GetEventEncodingVersion(), requiredEventEncodingVersion) + if err != nil { + return nil, status.Errorf(codes.Internal, + "failed to convert events to message in txID %x: %v", txID, err) + } + + results = append(results, &accessmodel.TransactionResult{ + Status: txStatus, + StatusCode: uint(txResult.GetStatusCode()), + Events: events, + ErrorMessage: txResult.GetErrorMessage(), + BlockID: blockID, + TransactionID: txID, + CollectionID: guarantee.CollectionID, + BlockHeight: block.Header.Height, + }) + + i++ + } + } + + // after iterating through all transactions in each collection, i equals the total number of + // user transactions in the block + txCount := i + sporkRootBlockHeight := e.state.Params().SporkRootBlockHeight() + + // root block has no system transaction result + if block.Header.Height > sporkRootBlockHeight { + // system chunk transaction + + // resp.TransactionResultsByBlockID includes the system tx result, so there should be exactly one + // more result than txCount + if txCount != len(resp.TransactionResults)-1 { + if txCount >= len(resp.TransactionResults) { + return nil, errInsufficientResults + } + // otherwise there are extra results + // TODO(bft): slashable offense + return nil, status.Errorf(codes.Internal, "number of transaction results returned by execution node is more than the number of transactions in the block") + } + + systemTxResult := resp.TransactionResults[len(resp.TransactionResults)-1] + systemTxStatus, err := e.txStatusDeriver.DeriveTransactionStatus(block.Header.Height, true) + if err != nil { + if !errors.Is(err, state.ErrUnknownSnapshotReference) { + irrecoverable.Throw(ctx, err) + } + return nil, rpc.ConvertStorageError(err) + } + + events, err := convert.MessagesToEventsWithEncodingConversion(systemTxResult.GetEvents(), resp.GetEventEncodingVersion(), requiredEventEncodingVersion) + if err != nil { + return nil, rpc.ConvertError(err, "failed to convert events from system tx result", codes.Internal) + } + + results = append(results, &accessmodel.TransactionResult{ + Status: systemTxStatus, + StatusCode: uint(systemTxResult.GetStatusCode()), + Events: events, + ErrorMessage: systemTxResult.GetErrorMessage(), + BlockID: blockID, + TransactionID: e.systemTxID, + BlockHeight: block.Header.Height, + }) + } + return results, nil +} + +func (e *ENTransactionProvider) getTransactionResultFromAnyExeNode( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionResultRequest, +) (*execproto.GetTransactionResultResponse, error) { + var errToReturn error + + defer func() { + if errToReturn != nil { + e.log.Info().Err(errToReturn).Msg("failed to get transaction result from execution nodes") + } + }() + + var resp *execproto.GetTransactionResultResponse + errToReturn = e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + resp, err = e.tryGetTransactionResult(ctx, node, req) + if err == nil { + e.log.Debug(). + Str("execution_node", node.String()). + Hex("block_id", req.GetBlockId()). + Hex("transaction_id", req.GetTransactionId()). + Msg("Successfully got transaction results from any node") + return nil + } + return err + }, + nil, + ) + + return resp, errToReturn +} + +func (e *ENTransactionProvider) getTransactionResultsByBlockIDFromAnyExeNode( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionsByBlockIDRequest, +) (*execproto.GetTransactionResultsResponse, error) { + var errToReturn error + + defer func() { + // log the errors + if errToReturn != nil { + e.log.Err(errToReturn).Msg("failed to get transaction results from execution nodes") + } + }() + + // if we were passed 0 execution nodes add a specific error + if len(execNodes) == 0 { + return nil, errors.New("zero execution nodes") + } + + var resp *execproto.GetTransactionResultsResponse + errToReturn = e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + resp, err = e.tryGetTransactionResultsByBlockID(ctx, node, req) + if err == nil { + e.log.Debug(). + Str("execution_node", node.String()). + Hex("block_id", req.GetBlockId()). + Msg("Successfully got transaction results from any node") + return nil + } + return err + }, + nil, + ) + + return resp, errToReturn +} + +func (e *ENTransactionProvider) getTransactionResultByIndexFromAnyExeNode( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionByIndexRequest, +) (*execproto.GetTransactionResultResponse, error) { + var errToReturn error + defer func() { + if errToReturn != nil { + e.log.Info().Err(errToReturn).Msg("failed to get transaction result from execution nodes") + } + }() + + if len(execNodes) == 0 { + return nil, errors.New("zero execution nodes provided") + } + + var resp *execproto.GetTransactionResultResponse + errToReturn = e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + resp, err = e.tryGetTransactionResultByIndex(ctx, node, req) + if err == nil { + e.log.Debug(). + Str("execution_node", node.String()). + Hex("block_id", req.GetBlockId()). + Uint32("index", req.GetIndex()). + Msg("Successfully got transaction results from any node") + return nil + } + return err + }, + nil, + ) + + return resp, errToReturn +} + +func (e *ENTransactionProvider) tryGetTransactionResult( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetTransactionResultRequest, +) (*execproto.GetTransactionResultResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + + resp, err := execRPCClient.GetTransactionResult(ctx, req) + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ENTransactionProvider) tryGetTransactionResultsByBlockID( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetTransactionsByBlockIDRequest, +) (*execproto.GetTransactionResultsResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + + resp, err := execRPCClient.GetTransactionResultsByBlockID(ctx, req) + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ENTransactionProvider) tryGetTransactionResultByIndex( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetTransactionByIndexRequest, +) (*execproto.GetTransactionResultResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + + resp, err := execRPCClient.GetTransactionResultByIndex(ctx, req) + if err != nil { + return nil, err + } + + return resp, nil +} diff --git a/engine/access/rpc/backend/transactions/provider/failover.go b/engine/access/rpc/backend/transactions/provider/failover.go new file mode 100644 index 00000000000..963a6f40d00 --- /dev/null +++ b/engine/access/rpc/backend/transactions/provider/failover.go @@ -0,0 +1,68 @@ +package provider + +import ( + "context" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" +) + +type FailoverTransactionProvider struct { + localProvider TransactionProvider + execNodeProvider TransactionProvider +} + +var _ TransactionProvider = (*FailoverTransactionProvider)(nil) + +func NewFailoverTransactionProvider(local TransactionProvider, execNode TransactionProvider) *FailoverTransactionProvider { + return &FailoverTransactionProvider{ + localProvider: local, + execNodeProvider: execNode, + } +} + +func (f *FailoverTransactionProvider) TransactionResult( + ctx context.Context, + header *flow.Header, + txID flow.Identifier, + encodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + localResult, localErr := f.localProvider.TransactionResult(ctx, header, txID, encodingVersion) + if localErr == nil { + return localResult, nil + } + + execNodeResult, execNodeErr := f.execNodeProvider.TransactionResult(ctx, header, txID, encodingVersion) + return execNodeResult, execNodeErr +} + +func (f *FailoverTransactionProvider) TransactionResultByIndex( + ctx context.Context, + block *flow.Block, + index uint32, + encodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + localResult, localErr := f.localProvider.TransactionResultByIndex(ctx, block, index, encodingVersion) + if localErr == nil { + return localResult, nil + } + + execNodeResult, execNodeErr := f.execNodeProvider.TransactionResultByIndex(ctx, block, index, encodingVersion) + return execNodeResult, execNodeErr +} + +func (f *FailoverTransactionProvider) TransactionResultsByBlockID( + ctx context.Context, + block *flow.Block, + encodingVersion entities.EventEncodingVersion, +) ([]*accessmodel.TransactionResult, error) { + localResults, localErr := f.localProvider.TransactionResultsByBlockID(ctx, block, encodingVersion) + if localErr == nil { + return localResults, nil + } + + execNodeResults, execNodeErr := f.execNodeProvider.TransactionResultsByBlockID(ctx, block, encodingVersion) + return execNodeResults, execNodeErr +} diff --git a/engine/access/rpc/backend/transactions_local_data_provider.go b/engine/access/rpc/backend/transactions/provider/local.go similarity index 57% rename from engine/access/rpc/backend/transactions_local_data_provider.go rename to engine/access/rpc/backend/transactions/provider/local.go index 648f373d916..7cb8addd648 100644 --- a/engine/access/rpc/backend/transactions_local_data_provider.go +++ b/engine/access/rpc/backend/transactions/provider/local.go @@ -1,4 +1,4 @@ -package backend +package provider import ( "context" @@ -11,11 +11,12 @@ import ( "google.golang.org/grpc/codes" "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/engine/common/rpc/convert" accessmodel "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/state/protocol" @@ -25,40 +26,43 @@ import ( // ErrTransactionNotInBlock represents an error indicating that the transaction is not found in the block. var ErrTransactionNotInBlock = errors.New("transaction not in block") -// TransactionErrorMessage declares the lookup transaction error methods by different input parameters. -type TransactionErrorMessage interface { - // LookupErrorMessageByTransactionID is a function type for getting transaction error message by block ID and transaction ID. - // Expected errors during normal operation: - // - InsufficientExecutionReceipts - found insufficient receipts for given block ID. - // - status.Error - remote GRPC call to EN has failed. - LookupErrorMessageByTransactionID(ctx context.Context, blockID flow.Identifier, height uint64, transactionID flow.Identifier) (string, error) - - // LookupErrorMessageByIndex is a function type for getting transaction error message by index. - // Expected errors during normal operation: - // - InsufficientExecutionReceipts - found insufficient receipts for given block ID. - // - status.Error - remote GRPC call to EN has failed. - LookupErrorMessageByIndex(ctx context.Context, blockID flow.Identifier, height uint64, index uint32) (string, error) - - // LookupErrorMessagesByBlockID is a function type for getting transaction error messages by block ID. - // Expected errors during normal operation: - // - InsufficientExecutionReceipts - found insufficient receipts for given block ID. - // - status.Error - remote GRPC call to EN has failed. - LookupErrorMessagesByBlockID(ctx context.Context, blockID flow.Identifier, height uint64) (map[flow.Identifier]string, error) +// LocalTransactionProvider provides functionality for retrieving transaction results and error messages from local storages +type LocalTransactionProvider struct { + state protocol.State + collections storage.Collections + blocks storage.Blocks + eventsIndex *index.EventsIndex + txResultsIndex *index.TransactionResultsIndex + txErrorMessages error_messages.Provider + systemTxID flow.Identifier + txStatusDeriver *txstatus.TxStatusDeriver } -// TransactionsLocalDataProvider provides functionality for retrieving transaction results and error messages from local storages -type TransactionsLocalDataProvider struct { - state protocol.State - collections storage.Collections - blocks storage.Blocks - eventsIndex *index.EventsIndex - txResultsIndex *index.TransactionResultsIndex - txErrorMessages TransactionErrorMessage - systemTxID flow.Identifier - lastFullBlockHeight *counters.PersistentStrictMonotonicCounter +var _ TransactionProvider = (*LocalTransactionProvider)(nil) + +func NewLocalTransactionProvider( + state protocol.State, + collections storage.Collections, + blocks storage.Blocks, + eventsIndex *index.EventsIndex, + txResultsIndex *index.TransactionResultsIndex, + txErrorMessages error_messages.Provider, + systemTxID flow.Identifier, + txStatusDeriver *txstatus.TxStatusDeriver, +) *LocalTransactionProvider { + return &LocalTransactionProvider{ + state: state, + collections: collections, + blocks: blocks, + eventsIndex: eventsIndex, + txResultsIndex: txResultsIndex, + txErrorMessages: txErrorMessages, + systemTxID: systemTxID, + txStatusDeriver: txStatusDeriver, + } } -// GetTransactionResultFromStorage retrieves a transaction result from storage by block ID and transaction ID. +// TransactionResult retrieves a transaction result from storage by block ID and transaction ID. // Expected errors during normal operation: // - codes.NotFound when result cannot be provided by storage due to the absence of data. // - codes.Internal if event payload conversion failed. @@ -67,11 +71,11 @@ type TransactionsLocalDataProvider struct { // // All other errors are considered as state corruption (fatal) or internal errors in the transaction error message // getter or when deriving transaction status. -func (t *TransactionsLocalDataProvider) GetTransactionResultFromStorage( +func (t *LocalTransactionProvider) TransactionResult( ctx context.Context, block *flow.Header, transactionID flow.Identifier, - requiredEventEncodingVersion entities.EventEncodingVersion, + encodingVersion entities.EventEncodingVersion, ) (*accessmodel.TransactionResult, error) { blockID := block.ID() txResult, err := t.txResultsIndex.ByBlockIDTransactionID(blockID, block.Height, transactionID) @@ -82,19 +86,24 @@ func (t *TransactionsLocalDataProvider) GetTransactionResultFromStorage( var txErrorMessage string var txStatusCode uint = 0 if txResult.Failed { - txErrorMessage, err = t.txErrorMessages.LookupErrorMessageByTransactionID(ctx, blockID, block.Height, transactionID) + txErrorMessage, err = t.txErrorMessages.ErrorMessageByTransactionID(ctx, blockID, block.Height, transactionID) if err != nil { return nil, err } if len(txErrorMessage) == 0 { - return nil, status.Errorf(codes.Internal, "transaction failed but error message is empty for tx ID: %s block ID: %s", txResult.TransactionID, blockID) + return nil, status.Errorf( + codes.Internal, + "transaction failed but error message is empty for tx ID: %s block ID: %s", + txResult.TransactionID, + blockID, + ) } txStatusCode = 1 // statusCode of 1 indicates an error and 0 indicates no error, the same as on EN } - txStatus, err := t.DeriveTransactionStatus(block.Height, true) + txStatus, err := t.txStatusDeriver.DeriveTransactionStatus(block.Height, true) if err != nil { if !errors.Is(err, state.ErrUnknownSnapshotReference) { irrecoverable.Throw(ctx, err) @@ -108,7 +117,7 @@ func (t *TransactionsLocalDataProvider) GetTransactionResultFromStorage( } // events are encoded in CCF format in storage. convert to JSON-CDC if requested - if requiredEventEncodingVersion == entities.EventEncodingVersion_JSON_CDC_V0 { + if encodingVersion == entities.EventEncodingVersion_JSON_CDC_V0 { events, err = convert.CcfEventsToJsonEvents(events) if err != nil { return nil, rpc.ConvertError(err, "failed to convert event payload", codes.Internal) @@ -126,7 +135,7 @@ func (t *TransactionsLocalDataProvider) GetTransactionResultFromStorage( }, nil } -// GetTransactionResultsByBlockIDFromStorage retrieves transaction results by block ID from storage +// TransactionResultByIndex retrieves a transaction result by index from storage. // Expected errors during normal operation: // - codes.NotFound if result cannot be provided by storage due to the absence of data. // - codes.Internal when event payload conversion failed. @@ -135,7 +144,81 @@ func (t *TransactionsLocalDataProvider) GetTransactionResultFromStorage( // // All other errors are considered as state corruption (fatal) or internal errors in the transaction error message // getter or when deriving transaction status. -func (t *TransactionsLocalDataProvider) GetTransactionResultsByBlockIDFromStorage( +func (t *LocalTransactionProvider) TransactionResultByIndex( + ctx context.Context, + block *flow.Block, + index uint32, + requiredEventEncodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + blockID := block.ID() + txResult, err := t.txResultsIndex.ByBlockIDTransactionIndex(blockID, block.Header.Height, index) + if err != nil { + return nil, rpc.ConvertIndexError(err, block.Header.Height, "failed to get transaction result") + } + + var txErrorMessage string + var txStatusCode uint = 0 + if txResult.Failed { + txErrorMessage, err = t.txErrorMessages.ErrorMessageByIndex(ctx, blockID, block.Header.Height, index) + if err != nil { + return nil, err + } + + if len(txErrorMessage) == 0 { + return nil, status.Errorf(codes.Internal, "transaction failed but error message is empty for tx ID: %s block ID: %s", txResult.TransactionID, blockID) + } + + txStatusCode = 1 // statusCode of 1 indicates an error and 0 indicates no error, the same as on EN + } + + txStatus, err := t.txStatusDeriver.DeriveTransactionStatus(block.Header.Height, true) + if err != nil { + if !errors.Is(err, state.ErrUnknownSnapshotReference) { + irrecoverable.Throw(ctx, err) + } + return nil, rpc.ConvertStorageError(err) + } + + events, err := t.eventsIndex.ByBlockIDTransactionIndex(blockID, block.Header.Height, index) + if err != nil { + return nil, rpc.ConvertIndexError(err, block.Header.Height, "failed to get events") + } + + // events are encoded in CCF format in storage. convert to JSON-CDC if requested + if requiredEventEncodingVersion == entities.EventEncodingVersion_JSON_CDC_V0 { + events, err = convert.CcfEventsToJsonEvents(events) + if err != nil { + return nil, rpc.ConvertError(err, "failed to convert event payload", codes.Internal) + } + } + + collectionID, err := t.lookupCollectionIDInBlock(block, txResult.TransactionID) + if err != nil { + return nil, err + } + + return &accessmodel.TransactionResult{ + TransactionID: txResult.TransactionID, + Status: txStatus, + StatusCode: txStatusCode, + Events: events, + ErrorMessage: txErrorMessage, + BlockID: blockID, + BlockHeight: block.Header.Height, + CollectionID: collectionID, + }, nil +} + +// TransactionResultsByBlockID retrieves transaction results by block ID from storage +// Expected errors during normal operation: +// - codes.NotFound if result cannot be provided by storage due to the absence of data. +// - codes.Internal when event payload conversion failed. +// - indexer.ErrIndexNotInitialized when txResultsIndex not initialized +// - storage.ErrHeightNotIndexed when data is unavailable +// +// All other errors are considered as state corruption (fatal) or internal errors in the transaction error message +// getter or when deriving transaction status. +func (t *LocalTransactionProvider) TransactionResultsByBlockID( ctx context.Context, block *flow.Block, requiredEventEncodingVersion entities.EventEncodingVersion, @@ -146,7 +229,7 @@ func (t *TransactionsLocalDataProvider) GetTransactionResultsByBlockIDFromStorag return nil, rpc.ConvertIndexError(err, block.Header.Height, "failed to get transaction result") } - txErrors, err := t.txErrorMessages.LookupErrorMessagesByBlockID(ctx, blockID, block.Header.Height) + txErrors, err := t.txErrorMessages.ErrorMessagesByBlockID(ctx, blockID, block.Header.Height) if err != nil { return nil, err } @@ -178,7 +261,7 @@ func (t *TransactionsLocalDataProvider) GetTransactionResultsByBlockIDFromStorag txStatusCode = 1 } - txStatus, err := t.DeriveTransactionStatus(block.Header.Height, true) + txStatus, err := t.txStatusDeriver.DeriveTransactionStatus(block.Header.Height, true) if err != nil { if !errors.Is(err, state.ErrUnknownSnapshotReference) { irrecoverable.Throw(ctx, err) @@ -219,159 +302,9 @@ func (t *TransactionsLocalDataProvider) GetTransactionResultsByBlockIDFromStorag return results, nil } -// GetTransactionResultByIndexFromStorage retrieves a transaction result by index from storage. -// Expected errors during normal operation: -// - codes.NotFound if result cannot be provided by storage due to the absence of data. -// - codes.Internal when event payload conversion failed. -// - indexer.ErrIndexNotInitialized when txResultsIndex not initialized -// - storage.ErrHeightNotIndexed when data is unavailable -// -// All other errors are considered as state corruption (fatal) or internal errors in the transaction error message -// getter or when deriving transaction status. -func (t *TransactionsLocalDataProvider) GetTransactionResultByIndexFromStorage( - ctx context.Context, - block *flow.Block, - index uint32, - requiredEventEncodingVersion entities.EventEncodingVersion, -) (*accessmodel.TransactionResult, error) { - blockID := block.ID() - txResult, err := t.txResultsIndex.ByBlockIDTransactionIndex(blockID, block.Header.Height, index) - if err != nil { - return nil, rpc.ConvertIndexError(err, block.Header.Height, "failed to get transaction result") - } - - var txErrorMessage string - var txStatusCode uint = 0 - if txResult.Failed { - txErrorMessage, err = t.txErrorMessages.LookupErrorMessageByIndex(ctx, blockID, block.Header.Height, index) - if err != nil { - return nil, err - } - - if len(txErrorMessage) == 0 { - return nil, status.Errorf(codes.Internal, "transaction failed but error message is empty for tx ID: %s block ID: %s", txResult.TransactionID, blockID) - } - - txStatusCode = 1 // statusCode of 1 indicates an error and 0 indicates no error, the same as on EN - } - - txStatus, err := t.DeriveTransactionStatus(block.Header.Height, true) - if err != nil { - if !errors.Is(err, state.ErrUnknownSnapshotReference) { - irrecoverable.Throw(ctx, err) - } - return nil, rpc.ConvertStorageError(err) - } - - events, err := t.eventsIndex.ByBlockIDTransactionIndex(blockID, block.Header.Height, index) - if err != nil { - return nil, rpc.ConvertIndexError(err, block.Header.Height, "failed to get events") - } - - // events are encoded in CCF format in storage. convert to JSON-CDC if requested - if requiredEventEncodingVersion == entities.EventEncodingVersion_JSON_CDC_V0 { - events, err = convert.CcfEventsToJsonEvents(events) - if err != nil { - return nil, rpc.ConvertError(err, "failed to convert event payload", codes.Internal) - } - } - - collectionID, err := t.LookupCollectionIDInBlock(block, txResult.TransactionID) - if err != nil { - return nil, err - } - - return &accessmodel.TransactionResult{ - TransactionID: txResult.TransactionID, - Status: txStatus, - StatusCode: txStatusCode, - Events: events, - ErrorMessage: txErrorMessage, - BlockID: blockID, - BlockHeight: block.Header.Height, - CollectionID: collectionID, - }, nil -} - -// DeriveUnknownTransactionStatus is used to determine the status of transaction -// that are not in a block yet based on the provided reference block ID. -func (t *TransactionsLocalDataProvider) DeriveUnknownTransactionStatus(refBlockID flow.Identifier) (flow.TransactionStatus, error) { - referenceBlock, err := t.state.AtBlockID(refBlockID).Head() - if err != nil { - return flow.TransactionStatusUnknown, err - } - refHeight := referenceBlock.Height - // get the latest finalized block from the state - finalized, err := t.state.Final().Head() - if err != nil { - return flow.TransactionStatusUnknown, irrecoverable.NewExceptionf("failed to lookup final header: %w", err) - } - finalizedHeight := finalized.Height - - // if we haven't seen the expiry block for this transaction, it's not expired - if !isExpired(refHeight, finalizedHeight) { - return flow.TransactionStatusPending, nil - } - - // At this point, we have seen the expiry block for the transaction. - // This means that, if no collections prior to the expiry block contain - // the transaction, it can never be included and is expired. - // - // To ensure this, we need to have received all collections up to the - // expiry block to ensure the transaction did not appear in any. - - // the last full height is the height where we have received all - // collections for all blocks with a lower height - fullHeight := t.lastFullBlockHeight.Value() - - // if we have received collections for all blocks up to the expiry block, the transaction is expired - if isExpired(refHeight, fullHeight) { - return flow.TransactionStatusExpired, nil - } - - // tx found in transaction storage and collection storage but not in block storage - // However, this will not happen as of now since the ingestion engine doesn't subscribe - // for collections - return flow.TransactionStatusPending, nil -} - -// DeriveTransactionStatus is used to determine the status of a transaction based on the provided block height, and execution status. -// No errors expected during normal operations. -func (t *TransactionsLocalDataProvider) DeriveTransactionStatus(blockHeight uint64, executed bool) (flow.TransactionStatus, error) { - if !executed { - // If we've gotten here, but the block has not yet been executed, report it as only been finalized - return flow.TransactionStatusFinalized, nil - } - - // From this point on, we know for sure this transaction has at least been executed - - // get the latest sealed block from the State - sealed, err := t.state.Sealed().Head() - if err != nil { - return flow.TransactionStatusUnknown, irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) - } - - if blockHeight > sealed.Height { - // The block is not yet sealed, so we'll report it as only executed - return flow.TransactionStatusExecuted, nil - } - - // otherwise, this block has been executed, and sealed, so report as sealed - return flow.TransactionStatusSealed, nil -} - -// isExpired checks whether a transaction is expired given the height of the -// transaction's reference block and the height to compare against. -func isExpired(refHeight, compareToHeight uint64) bool { - if compareToHeight <= refHeight { - return false - } - return compareToHeight-refHeight > flow.DefaultTransactionExpiry -} - -// LookupCollectionIDInBlock returns the collection ID based on the transaction ID. The lookup is performed in block -// collections. -func (t *TransactionsLocalDataProvider) LookupCollectionIDInBlock( +// lookupCollectionIDInBlock returns the collection ID based on the transaction ID. +// The lookup is performed in block collections. +func (t *LocalTransactionProvider) lookupCollectionIDInBlock( block *flow.Block, txID flow.Identifier, ) (flow.Identifier, error) { @@ -392,7 +325,7 @@ func (t *TransactionsLocalDataProvider) LookupCollectionIDInBlock( // buildTxIDToCollectionIDMapping returns a map of transaction ID to collection ID based on the provided block. // No errors expected during normal operations. -func (t *TransactionsLocalDataProvider) buildTxIDToCollectionIDMapping(block *flow.Block) (map[flow.Identifier]flow.Identifier, error) { +func (t *LocalTransactionProvider) buildTxIDToCollectionIDMapping(block *flow.Block) (map[flow.Identifier]flow.Identifier, error) { txToCollectionID := make(map[flow.Identifier]flow.Identifier) for _, guarantee := range block.Payload.Guarantees { collection, err := t.collections.LightByID(guarantee.ID()) @@ -404,7 +337,6 @@ func (t *TransactionsLocalDataProvider) buildTxIDToCollectionIDMapping(block *fl txToCollectionID[txID] = guarantee.ID() } } - txToCollectionID[t.systemTxID] = flow.ZeroID return txToCollectionID, nil diff --git a/engine/access/rpc/backend/transactions/provider/mock/transaction_provider.go b/engine/access/rpc/backend/transactions/provider/mock/transaction_provider.go new file mode 100644 index 00000000000..6bb499ef239 --- /dev/null +++ b/engine/access/rpc/backend/transactions/provider/mock/transaction_provider.go @@ -0,0 +1,124 @@ +// Code generated by mockery v2.53.3. DO NOT EDIT. + +package mock + +import ( + context "context" + + access "github.com/onflow/flow-go/model/access" + + entities "github.com/onflow/flow/protobuf/go/flow/entities" + + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" +) + +// TransactionProvider is an autogenerated mock type for the TransactionProvider type +type TransactionProvider struct { + mock.Mock +} + +// TransactionResult provides a mock function with given fields: ctx, header, txID, encodingVersion +func (_m *TransactionProvider) TransactionResult(ctx context.Context, header *flow.Header, txID flow.Identifier, encodingVersion entities.EventEncodingVersion) (*access.TransactionResult, error) { + ret := _m.Called(ctx, header, txID, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for TransactionResult") + } + + var r0 *access.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *flow.Header, flow.Identifier, entities.EventEncodingVersion) (*access.TransactionResult, error)); ok { + return rf(ctx, header, txID, encodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, *flow.Header, flow.Identifier, entities.EventEncodingVersion) *access.TransactionResult); ok { + r0 = rf(ctx, header, txID, encodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *flow.Header, flow.Identifier, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, header, txID, encodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionResultByIndex provides a mock function with given fields: ctx, block, index, encodingVersion +func (_m *TransactionProvider) TransactionResultByIndex(ctx context.Context, block *flow.Block, index uint32, encodingVersion entities.EventEncodingVersion) (*access.TransactionResult, error) { + ret := _m.Called(ctx, block, index, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for TransactionResultByIndex") + } + + var r0 *access.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *flow.Block, uint32, entities.EventEncodingVersion) (*access.TransactionResult, error)); ok { + return rf(ctx, block, index, encodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, *flow.Block, uint32, entities.EventEncodingVersion) *access.TransactionResult); ok { + r0 = rf(ctx, block, index, encodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *flow.Block, uint32, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, block, index, encodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionResultsByBlockID provides a mock function with given fields: ctx, block, encodingVersion +func (_m *TransactionProvider) TransactionResultsByBlockID(ctx context.Context, block *flow.Block, encodingVersion entities.EventEncodingVersion) ([]*access.TransactionResult, error) { + ret := _m.Called(ctx, block, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for TransactionResultsByBlockID") + } + + var r0 []*access.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *flow.Block, entities.EventEncodingVersion) ([]*access.TransactionResult, error)); ok { + return rf(ctx, block, encodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, *flow.Block, entities.EventEncodingVersion) []*access.TransactionResult); ok { + r0 = rf(ctx, block, encodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*access.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *flow.Block, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, block, encodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewTransactionProvider creates a new instance of TransactionProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *TransactionProvider { + mock := &TransactionProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/backend/transactions/provider/provider.go b/engine/access/rpc/backend/transactions/provider/provider.go new file mode 100644 index 00000000000..e180a96f425 --- /dev/null +++ b/engine/access/rpc/backend/transactions/provider/provider.go @@ -0,0 +1,34 @@ +package provider + +import ( + "context" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" +) + +// TransactionProvider defines an interface for retrieving transaction results +// from various data sources, such as local storage and execution nodes. +type TransactionProvider interface { + TransactionResult( + ctx context.Context, + header *flow.Header, + txID flow.Identifier, + encodingVersion entities.EventEncodingVersion, + ) (*accessmodel.TransactionResult, error) + + TransactionResultByIndex( + ctx context.Context, + block *flow.Block, + index uint32, + encodingVersion entities.EventEncodingVersion, + ) (*accessmodel.TransactionResult, error) + + TransactionResultsByBlockID( + ctx context.Context, + block *flow.Block, + encodingVersion entities.EventEncodingVersion, + ) ([]*accessmodel.TransactionResult, error) +} diff --git a/engine/access/rpc/backend/transactions/retrier/mock/retrier.go b/engine/access/rpc/backend/transactions/retrier/mock/retrier.go new file mode 100644 index 00000000000..be286cc99a9 --- /dev/null +++ b/engine/access/rpc/backend/transactions/retrier/mock/retrier.go @@ -0,0 +1,50 @@ +// Code generated by mockery v2.53.3. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// Retrier is an autogenerated mock type for the Retrier type +type Retrier struct { + mock.Mock +} + +// RegisterTransaction provides a mock function with given fields: height, tx +func (_m *Retrier) RegisterTransaction(height uint64, tx *flow.TransactionBody) { + _m.Called(height, tx) +} + +// Retry provides a mock function with given fields: height +func (_m *Retrier) Retry(height uint64) error { + ret := _m.Called(height) + + if len(ret) == 0 { + panic("no return value specified for Retry") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64) error); ok { + r0 = rf(height) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewRetrier creates a new instance of Retrier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRetrier(t interface { + mock.TestingT + Cleanup(func()) +}) *Retrier { + mock := &Retrier{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/backend/transactions/retrier/mock/transaction_sender.go b/engine/access/rpc/backend/transactions/retrier/mock/transaction_sender.go new file mode 100644 index 00000000000..963ec911549 --- /dev/null +++ b/engine/access/rpc/backend/transactions/retrier/mock/transaction_sender.go @@ -0,0 +1,47 @@ +// Code generated by mockery v2.53.3. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// TransactionSender is an autogenerated mock type for the TransactionSender type +type TransactionSender struct { + mock.Mock +} + +// SendRawTransaction provides a mock function with given fields: ctx, tx +func (_m *TransactionSender) SendRawTransaction(ctx context.Context, tx *flow.TransactionBody) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendRawTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *flow.TransactionBody) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTransactionSender creates a new instance of TransactionSender. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionSender(t interface { + mock.TestingT + Cleanup(func()) +}) *TransactionSender { + mock := &TransactionSender{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/backend/transactions/retrier/noop.go b/engine/access/rpc/backend/transactions/retrier/noop.go new file mode 100644 index 00000000000..5f228e078c7 --- /dev/null +++ b/engine/access/rpc/backend/transactions/retrier/noop.go @@ -0,0 +1,19 @@ +package retrier + +import ( + "github.com/onflow/flow-go/model/flow" +) + +type NoopRetrier struct{} + +var _ Retrier = (*NoopRetrier)(nil) + +func NewNoopRetrier() *NoopRetrier { + return &NoopRetrier{} +} + +func (n *NoopRetrier) Retry(_ uint64) error { + return nil +} + +func (n *NoopRetrier) RegisterTransaction(_ uint64, _ *flow.TransactionBody) {} diff --git a/engine/access/rpc/backend/transactions/retrier/retrier.go b/engine/access/rpc/backend/transactions/retrier/retrier.go new file mode 100644 index 00000000000..1d1e944409f --- /dev/null +++ b/engine/access/rpc/backend/transactions/retrier/retrier.go @@ -0,0 +1,181 @@ +package retrier + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state" + "github.com/onflow/flow-go/storage" +) + +// RetryFrequency has to be less than TransactionExpiry or else this module does nothing +const RetryFrequency uint64 = 120 // Blocks + +type Transactions map[flow.Identifier]*flow.TransactionBody +type BlockHeightToTransactions map[uint64]Transactions + +type TransactionSender interface { + SendRawTransaction(ctx context.Context, tx *flow.TransactionBody) error +} + +type Retrier interface { + Retry(height uint64) error + RegisterTransaction(height uint64, tx *flow.TransactionBody) +} + +// RetrierImpl implements a simple retry mechanism for transaction submission. +type RetrierImpl struct { + log zerolog.Logger + + mu sync.RWMutex + pendingTransactions BlockHeightToTransactions + + blocks storage.Blocks + collections storage.Collections + + txSender TransactionSender + txStatusDeriver *status.TxStatusDeriver +} + +func NewRetrier( + log zerolog.Logger, + blocks storage.Blocks, + collections storage.Collections, + txSender TransactionSender, + txStatusDeriver *status.TxStatusDeriver, +) *RetrierImpl { + return &RetrierImpl{ + log: log, + pendingTransactions: BlockHeightToTransactions{}, + blocks: blocks, + collections: collections, + txSender: txSender, + txStatusDeriver: txStatusDeriver, + } +} + +// Retry attempts to resend transactions for a specified block height. +// It performs cleanup operations, including pruning old transactions, and retries sending +// transactions that are still pending. +// The method takes a block height as input. If the provided height is lower than +// flow.DefaultTransactionExpiry, no retries are performed, and the method returns nil. +// No errors expected during normal operations. +func (r *RetrierImpl) Retry(height uint64) error { + // No need to retry if height is lower than DefaultTransactionExpiry + if height < flow.DefaultTransactionExpiry { + return nil + } + + // naive cleanup for now, prune every 120 Blocks + if height%RetryFrequency == 0 { + r.prune(height) + } + + heightToRetry := height - flow.DefaultTransactionExpiry + RetryFrequency + + for heightToRetry < height { + err := r.retryTxsAtHeight(heightToRetry) + if err != nil { + return err + } + heightToRetry = heightToRetry + RetryFrequency + } + return nil +} + +// RegisterTransaction adds a transaction that could possibly be retried +func (r *RetrierImpl) RegisterTransaction(height uint64, tx *flow.TransactionBody) { + r.mu.Lock() + defer r.mu.Unlock() + if r.pendingTransactions[height] == nil { + r.pendingTransactions[height] = make(map[flow.Identifier]*flow.TransactionBody) + } + r.pendingTransactions[height][tx.ID()] = tx +} + +func (r *RetrierImpl) prune(height uint64) { + r.mu.Lock() + defer r.mu.Unlock() + // If height is less than the default, there will be no expired Transactions + if height < flow.DefaultTransactionExpiry { + return + } + for h := range r.pendingTransactions { + if h < height-flow.DefaultTransactionExpiry { + delete(r.pendingTransactions, h) + } + } +} + +// retryTxsAtHeight retries transactions at a specific block height. +// It looks up transactions at the specified height and retries sending +// raw transactions for those that are still pending. It also cleans up +// transactions that are no longer pending or have an unknown status. +// Error returns: +// - errors are unexpected and potentially symptoms of internal implementation bugs or state corruption (fatal). +func (r *RetrierImpl) retryTxsAtHeight(heightToRetry uint64) error { + r.mu.Lock() + defer r.mu.Unlock() + txsAtHeight := r.pendingTransactions[heightToRetry] + for txID, tx := range txsAtHeight { + // find the block for the transaction + block, err := r.lookupBlock(txID) + if err != nil { + if !errors.Is(err, storage.ErrNotFound) { + return err + } + block = nil + } + + // find the transaction status + var status flow.TransactionStatus + if block == nil { + status, err = r.txStatusDeriver.DeriveUnknownTransactionStatus(tx.ReferenceBlockID) + } else { + status, err = r.txStatusDeriver.DeriveTransactionStatus(block.Header.Height, false) + } + + if err != nil { + if !errors.Is(err, state.ErrUnknownSnapshotReference) { + return err + } + continue + } + if status == flow.TransactionStatusPending { + err = r.txSender.SendRawTransaction(context.Background(), tx) + if err != nil { + r.log.Info(). + Str("retry", fmt.Sprintf("retryTxsAtHeight: %v", heightToRetry)). + Err(err). + Msg("failed to send raw transactions") + } + } else if status != flow.TransactionStatusUnknown { + // not pending or unknown, don't need to retry anymore + delete(txsAtHeight, txID) + } + } + return nil +} + +// Error returns: +// - `storage.ErrNotFound` - collection referenced by transaction or block by a collection has not been found. +// - all other errors are unexpected and potentially symptoms of internal implementation bugs or state corruption (fatal). +func (r *RetrierImpl) lookupBlock(txID flow.Identifier) (*flow.Block, error) { + collection, err := r.collections.LightByTransactionID(txID) + if err != nil { + return nil, err + } + + block, err := r.blocks.ByCollectionID(collection.ID()) + if err != nil { + return nil, err + } + + return block, nil +} diff --git a/engine/access/rpc/backend/transactions/status/deriver.go b/engine/access/rpc/backend/transactions/status/deriver.go new file mode 100644 index 00000000000..d32b9d26357 --- /dev/null +++ b/engine/access/rpc/backend/transactions/status/deriver.go @@ -0,0 +1,96 @@ +package status + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" +) + +type TxStatusDeriver struct { + state protocol.State + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter +} + +func NewTxStatusDeriver(state protocol.State, lastFullBlockHeight *counters.PersistentStrictMonotonicCounter) *TxStatusDeriver { + return &TxStatusDeriver{ + state: state, + lastFullBlockHeight: lastFullBlockHeight, + } +} + +// DeriveUnknownTransactionStatus is used to determine the status of transaction +// that are not in a block yet based on the provided reference block ID. +func (t *TxStatusDeriver) DeriveUnknownTransactionStatus(refBlockID flow.Identifier) (flow.TransactionStatus, error) { + referenceBlock, err := t.state.AtBlockID(refBlockID).Head() + if err != nil { + return flow.TransactionStatusUnknown, err + } + refHeight := referenceBlock.Height + // get the latest finalized block from the state + finalized, err := t.state.Final().Head() + if err != nil { + return flow.TransactionStatusUnknown, irrecoverable.NewExceptionf("failed to lookup final header: %w", err) + } + finalizedHeight := finalized.Height + + // if we haven't seen the expiry block for this transaction, it's not expired + if !isExpired(refHeight, finalizedHeight) { + return flow.TransactionStatusPending, nil + } + + // At this point, we have seen the expiry block for the transaction. + // This means that, if no collections prior to the expiry block contain + // the transaction, it can never be included and is expired. + // + // To ensure this, we need to have received all collections up to the + // expiry block to ensure the transaction did not appear in any. + + // the last full height is the height where we have received all + // collections for all blocks with a lower height + fullHeight := t.lastFullBlockHeight.Value() + + // if we have received collections for all blocks up to the expiry block, the transaction is expired + if isExpired(refHeight, fullHeight) { + return flow.TransactionStatusExpired, nil + } + + // tx found in transaction storage and collection storage but not in block storage + // However, this will not happen as of now since the ingestion engine doesn't subscribe + // for collections + return flow.TransactionStatusPending, nil +} + +// DeriveTransactionStatus is used to determine the status of a transaction based on the provided block height, and execution status. +// No errors expected during normal operations. +func (t *TxStatusDeriver) DeriveTransactionStatus(blockHeight uint64, executed bool) (flow.TransactionStatus, error) { + if !executed { + // If we've gotten here, but the block has not yet been executed, report it as only been finalized + return flow.TransactionStatusFinalized, nil + } + + // From this point on, we know for sure this transaction has at least been executed + + // get the latest sealed block from the State + sealed, err := t.state.Sealed().Head() + if err != nil { + return flow.TransactionStatusUnknown, irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + } + + if blockHeight > sealed.Height { + // The block is not yet sealed, so we'll report it as only executed + return flow.TransactionStatusExecuted, nil + } + + // otherwise, this block has been executed, and sealed, so report as sealed + return flow.TransactionStatusSealed, nil +} + +// isExpired checks whether a transaction is expired given the height of the +// transaction's reference block and the height to compare against. +func isExpired(refHeight, compareToHeight uint64) bool { + if compareToHeight <= refHeight { + return false + } + return compareToHeight-refHeight > flow.DefaultTransactionExpiry +} diff --git a/engine/access/rpc/backend/backend_stream_transactions.go b/engine/access/rpc/backend/transactions/stream/stream_backend.go similarity index 78% rename from engine/access/rpc/backend/backend_stream_transactions.go rename to engine/access/rpc/backend/transactions/stream/stream_backend.go index 2b4de88607a..65f029a8d4d 100644 --- a/engine/access/rpc/backend/backend_stream_transactions.go +++ b/engine/access/rpc/backend/transactions/stream/stream_backend.go @@ -1,4 +1,4 @@ -package backend +package stream import ( "context" @@ -12,11 +12,16 @@ import ( "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/onflow/flow-go/access" + txprovider "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider" + txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" "github.com/onflow/flow-go/engine/access/subscription" "github.com/onflow/flow-go/engine/access/subscription/tracker" accessmodel "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" ) // TransactionExpiryForUnknownStatus defines the number of blocks after which @@ -26,15 +31,50 @@ const TransactionExpiryForUnknownStatus = flow.DefaultTransactionExpiry // sendTransaction defines a function type for sending a transaction. type sendTransaction func(ctx context.Context, tx *flow.TransactionBody) error -// backendSubscribeTransactions manages transaction subscriptions for monitoring transaction statuses. +// TransactionStream manages transaction subscriptions for monitoring transaction statuses. // It provides functionalities to send transactions, subscribe to transaction status updates, // and handle subscription lifecycles. -type backendSubscribeTransactions struct { +type TransactionStream struct { log zerolog.Logger - backendTransactions *backendTransactions + state protocol.State subscriptionHandler *subscription.SubscriptionHandler blockTracker tracker.BlockTracker sendTransaction sendTransaction + + blocks storage.Blocks + collections storage.Collections + transactions storage.Transactions + + txProvider *txprovider.FailoverTransactionProvider + txStatusDeriver *txstatus.TxStatusDeriver +} + +var _ access.TransactionStreamAPI = (*TransactionStream)(nil) + +func NewTransactionStreamBackend( + log zerolog.Logger, + state protocol.State, + subscriptionHandler *subscription.SubscriptionHandler, + blockTracker tracker.BlockTracker, + sendTransaction sendTransaction, + blocks storage.Blocks, + collections storage.Collections, + transactions storage.Transactions, + txProvider *txprovider.FailoverTransactionProvider, + txStatusDeriver *txstatus.TxStatusDeriver, +) *TransactionStream { + return &TransactionStream{ + log: log, + state: state, + subscriptionHandler: subscriptionHandler, + blockTracker: blockTracker, + sendTransaction: sendTransaction, + blocks: blocks, + collections: collections, + transactions: transactions, + txProvider: txProvider, + txStatusDeriver: txStatusDeriver, + } } // SendAndSubscribeTransactionStatuses sends a transaction and subscribes to its status updates. @@ -49,17 +89,17 @@ type backendSubscribeTransactions struct { // - requiredEventEncodingVersion: The version of event encoding required for the subscription. // // If the transaction cannot be sent, the subscription will fail and return a failed subscription. -func (b *backendSubscribeTransactions) SendAndSubscribeTransactionStatuses( +func (t *TransactionStream) SendAndSubscribeTransactionStatuses( ctx context.Context, tx *flow.TransactionBody, requiredEventEncodingVersion entities.EventEncodingVersion, ) subscription.Subscription { - if err := b.sendTransaction(ctx, tx); err != nil { - b.log.Debug().Err(err).Str("tx_id", tx.ID().String()).Msg("failed to send transaction") + if err := t.sendTransaction(ctx, tx); err != nil { + t.log.Debug().Err(err).Str("tx_id", tx.ID().String()).Msg("failed to send transaction") return subscription.NewFailedSubscription(err, "failed to send transaction") } - return b.createSubscription(ctx, tx.ID(), tx.ReferenceBlockID, tx.ReferenceBlockID, requiredEventEncodingVersion) + return t.createSubscription(ctx, tx.ID(), tx.ReferenceBlockID, tx.ReferenceBlockID, requiredEventEncodingVersion) } // SubscribeTransactionStatuses subscribes to status updates for a given transaction ID. @@ -72,19 +112,19 @@ func (b *backendSubscribeTransactions) SendAndSubscribeTransactionStatuses( // - ctx: The context to manage the subscription's lifecycle, including cancellation. // - txID: The unique identifier of the transaction to monitor. // - requiredEventEncodingVersion: The version of event encoding required for the subscription. -func (b *backendSubscribeTransactions) SubscribeTransactionStatuses( +func (t *TransactionStream) SubscribeTransactionStatuses( ctx context.Context, txID flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion, ) subscription.Subscription { - header, err := b.backendTransactions.state.Sealed().Head() + header, err := t.state.Sealed().Head() if err != nil { // throw the exception as the node must have the current sealed block in storage irrecoverable.Throw(ctx, fmt.Errorf("failed to lookup sealed block: %w", err)) return subscription.NewFailedSubscription(err, "failed to lookup sealed block") } - return b.createSubscription(ctx, txID, header.ID(), flow.ZeroID, requiredEventEncodingVersion) + return t.createSubscription(ctx, txID, header.ID(), flow.ZeroID, requiredEventEncodingVersion) } // createSubscription initializes a transaction subscription for monitoring status updates. @@ -103,7 +143,7 @@ func (b *backendSubscribeTransactions) SubscribeTransactionStatuses( // - subscription.Subscription: A subscription for monitoring transaction status updates. // // If the start height cannot be determined or current transaction state cannot be determined, a failed subscription is returned. -func (b *backendSubscribeTransactions) createSubscription( +func (t *TransactionStream) createSubscription( ctx context.Context, txID flow.Identifier, startBlockID flow.Identifier, @@ -111,26 +151,35 @@ func (b *backendSubscribeTransactions) createSubscription( requiredEventEncodingVersion entities.EventEncodingVersion, ) subscription.Subscription { // Determine the height of the block to start the subscription from. - startHeight, err := b.blockTracker.GetStartHeightFromBlockID(startBlockID) + startHeight, err := t.blockTracker.GetStartHeightFromBlockID(startBlockID) if err != nil { - b.log.Debug().Err(err).Str("block_id", startBlockID.String()).Msg("failed to get start height") + t.log.Debug().Err(err).Str("block_id", startBlockID.String()).Msg("failed to get start height") return subscription.NewFailedSubscription(err, "failed to get start height") } - txInfo := newTransactionSubscriptionMetadata(b.backendTransactions, txID, referenceBlockID, requiredEventEncodingVersion) + txInfo := NewTransactionMetadata( + t.blocks, + t.collections, + t.transactions, + txID, + referenceBlockID, + requiredEventEncodingVersion, + t.txProvider, + t.txStatusDeriver, + ) - return b.subscriptionHandler.Subscribe(ctx, startHeight, b.getTransactionStatusResponse(txInfo, startHeight)) + return t.subscriptionHandler.Subscribe(ctx, startHeight, t.getTransactionStatusResponse(txInfo, startHeight)) } // getTransactionStatusResponse returns a callback function that produces transaction status // subscription responses based on new blocks. // The returned callback is not concurrency-safe -func (b *backendSubscribeTransactions) getTransactionStatusResponse( - txInfo *transactionSubscriptionMetadata, +func (t *TransactionStream) getTransactionStatusResponse( + txInfo *TransactionMetadata, startHeight uint64, ) func(context.Context, uint64) (interface{}, error) { return func(ctx context.Context, height uint64) (interface{}, error) { - err := b.checkBlockReady(height) + err := t.checkBlockReady(height) if err != nil { return nil, err } @@ -175,9 +224,9 @@ func hasReachedUnknownStatusLimit(height, startHeight uint64, status flow.Transa // checkBlockReady checks if the given block height is valid and available based on the expected block status. // Expected errors during normal operation: // - [subscription.ErrBlockNotReady]: block for the given block height is not available. -func (b *backendSubscribeTransactions) checkBlockReady(height uint64) error { +func (t *TransactionStream) checkBlockReady(height uint64) error { // Get the highest available finalized block height - highestHeight, err := b.blockTracker.GetHighestHeight(flow.BlockStatusFinalized) + highestHeight, err := t.blockTracker.GetHighestHeight(flow.BlockStatusFinalized) if err != nil { return fmt.Errorf("could not get highest height for block %d: %w", height, err) } diff --git a/engine/access/rpc/backend/backend_stream_transactions_test.go b/engine/access/rpc/backend/transactions/stream/stream_backend_test.go similarity index 71% rename from engine/access/rpc/backend/backend_stream_transactions_test.go rename to engine/access/rpc/backend/transactions/stream/stream_backend_test.go index 7d5d7eef860..6468a6fb472 100644 --- a/engine/access/rpc/backend/backend_stream_transactions_test.go +++ b/engine/access/rpc/backend/transactions/stream/stream_backend_test.go @@ -1,4 +1,4 @@ -package backend +package stream import ( "context" @@ -8,6 +8,7 @@ import ( "testing" "time" + lru "github.com/hashicorp/golang-lru/v2" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -21,19 +22,27 @@ import ( accessproto "github.com/onflow/flow/protobuf/go/flow/access" "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/onflow/flow-go/access/validator" + validatormock "github.com/onflow/flow-go/access/validator/mock" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/access/index" access "github.com/onflow/flow-go/engine/access/mock" - backendmock "github.com/onflow/flow-go/engine/access/rpc/backend/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider" + txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" "github.com/onflow/flow-go/engine/access/subscription" trackermock "github.com/onflow/flow-go/engine/access/subscription/tracker/mock" commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/fvm/blueprints" accessmodel "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/counters" + execmock "github.com/onflow/flow-go/module/execution/mock" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" @@ -47,8 +56,8 @@ import ( "github.com/onflow/flow-go/utils/unittest/mocks" ) -// TransactionStatusSuite represents a suite for testing transaction status-related functionality in the Flow blockchain. -type TransactionStatusSuite struct { +// TransactionStreamSuite represents a suite for testing transaction status-related functionality in the Flow blockchain. +type TransactionStreamSuite struct { suite.Suite state *protocol.State @@ -73,10 +82,12 @@ type TransactionStatusSuite struct { archiveClient *access.AccessAPIClient connectionFactory *connectionmock.ConnectionFactory - communicator *backendmock.Communicator - blockTracker *trackermock.BlockTracker - reporter *syncmock.IndexReporter - indexReporter *index.Reporter + + blockTracker *trackermock.BlockTracker + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + eventIndex *index.EventsIndex + txResultIndex *index.TransactionResultsIndex chainID flow.ChainID @@ -87,19 +98,24 @@ type TransactionStatusSuite struct { blockMap map[uint64]*flow.Block - backend *Backend + txStreamBackend *TransactionStream db *badger.DB dbDir string lastFullBlockHeight *counters.PersistentStrictMonotonicCounter + + systemTx *flow.TransactionBody + + fixedExecutionNodeIDs flow.IdentifierList + preferredExecutionNodeIDs flow.IdentifierList } func TestTransactionStatusSuite(t *testing.T) { - suite.Run(t, new(TransactionStatusSuite)) + suite.Run(t, new(TransactionStreamSuite)) } -// SetupTest initializes the test dependencies, configurations, and mock objects for TransactionStatusSuite tests. -func (s *TransactionStatusSuite) SetupTest() { +// SetupTest initializes the test dependencies, configurations, and mock objects for TransactionStreamSuite tests. +func (s *TransactionStreamSuite) SetupTest() { s.log = zerolog.New(zerolog.NewConsoleWriter()) s.state = protocol.NewState(s.T()) s.sealedSnapshot = protocol.NewSnapshot(s.T()) @@ -122,35 +138,51 @@ func (s *TransactionStatusSuite) SetupTest() { s.chainID = flow.Testnet s.historicalAccessClient = access.NewAccessAPIClient(s.T()) s.connectionFactory = connectionmock.NewConnectionFactory(s.T()) - s.communicator = backendmock.NewCommunicator(s.T()) s.broadcaster = engine.NewBroadcaster() s.blockTracker = trackermock.NewBlockTracker(s.T()) s.reporter = syncmock.NewIndexReporter(s.T()) s.indexReporter = index.NewReporter() err := s.indexReporter.Initialize(s.reporter) require.NoError(s.T(), err) + s.eventIndex = index.NewEventsIndex(s.indexReporter, s.events) + s.txResultIndex = index.NewTransactionResultsIndex(s.indexReporter, s.transactionResults) + + s.systemTx, err = blueprints.SystemChunkTransaction(s.chainID.Chain()) + s.Require().NoError(err) + + s.fixedExecutionNodeIDs = nil + s.preferredExecutionNodeIDs = nil s.initializeBackend() } // TearDownTest cleans up the db -func (s *TransactionStatusSuite) TearDownTest() { +func (s *TransactionStreamSuite) TearDownTest() { err := os.RemoveAll(s.dbDir) s.Require().NoError(err) } -// initializeBackend sets up and initializes the backend with required dependencies, mocks, and configurations for testing. -func (s *TransactionStatusSuite) initializeBackend() { - s.transactions.On("Store", mock.Anything).Return(nil).Maybe() +// initializeBackend sets up and initializes the txStreamBackend with required dependencies, mocks, and configurations for testing. +func (s *TransactionStreamSuite) initializeBackend() { + s.transactions. + On("Store", mock.Anything). + Return(nil). + Maybe() + + s.execClient. + On("GetTransactionResult", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.NotFound, "not found")). + Maybe() - s.execClient.On("GetTransactionResult", mock.Anything, mock.Anything).Return(nil, status.Error(codes.NotFound, "not found")).Maybe() - s.connectionFactory.On("GetExecutionAPIClient", mock.Anything).Return(s.execClient, &mocks.MockCloser{}, nil).Maybe() + s.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(s.execClient, &mocks.MockCloser{}, nil). + Maybe() - s.colClient.On( - "SendTransaction", - mock.Anything, - mock.Anything, - ).Return(&accessproto.SendTransactionResponse{}, nil).Maybe() + s.colClient. + On("SendTransaction", mock.Anything, mock.Anything). + Return(&accessproto.SendTransactionResponse{}, nil). + Maybe() // generate blockCount consecutive blocks with associated seal, result and execution data s.rootBlock = unittest.BlockFixture() @@ -179,54 +211,138 @@ func (s *TransactionStatusSuite) initializeBackend() { s.finalizedBlock.Header.Height: s.finalizedBlock, } - backendParams := s.backendParams() - s.backend, err = New(backendParams) - require.NoError(s.T(), err) -} + txStatusDeriver := txstatus.NewTxStatusDeriver( + s.state, + s.lastFullBlockHeight, + ) + + nodeCommunicator := node_communicator.NewNodeCommunicator(false) + + execNodeProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + s.log, + s.state, + s.receipts, + s.preferredExecutionNodeIDs, + s.fixedExecutionNodeIDs, + ) + + errorMessageProvider := error_messages.NewTxErrorMessageProvider( + s.log, + nil, + s.txResultIndex, + s.connectionFactory, + nodeCommunicator, + execNodeProvider, + ) + + localTxProvider := provider.NewLocalTransactionProvider( + s.state, + s.collections, + s.blocks, + s.eventIndex, + s.txResultIndex, + errorMessageProvider, + s.systemTx.ID(), + txStatusDeriver, + ) + + execNodeTxProvider := provider.NewENTransactionProvider( + s.log, + s.state, + s.collections, + s.connectionFactory, + nodeCommunicator, + execNodeProvider, + txStatusDeriver, + s.systemTx.ID(), + s.systemTx, + ) + + txProvider := provider.NewFailoverTransactionProvider(localTxProvider, execNodeTxProvider) + + subscriptionHandler := subscription.NewSubscriptionHandler( + s.log, + s.broadcaster, + subscription.DefaultSendTimeout, + subscription.DefaultResponseLimit, + subscription.DefaultSendBufferSize, + ) -// backendParams returns the Params configuration for the backend. -func (s *TransactionStatusSuite) backendParams() Params { - return Params{ - State: s.state, - Blocks: s.blocks, - Headers: s.headers, - Collections: s.collections, - Transactions: s.transactions, - ExecutionReceipts: s.receipts, - ExecutionResults: s.results, - ChainID: s.chainID, - CollectionRPC: s.colClient, - MaxHeightRange: DefaultMaxHeightRange, - SnapshotHistoryLimit: DefaultSnapshotHistoryLimit, - Communicator: NewNodeCommunicator(false), - AccessMetrics: metrics.NewNoopCollector(), - Log: s.log, - BlockTracker: s.blockTracker, - SubscriptionHandler: subscription.NewSubscriptionHandler( - s.log, - s.broadcaster, - subscription.DefaultSendTimeout, - subscription.DefaultResponseLimit, - subscription.DefaultSendBufferSize, - ), - TxResultsIndex: index.NewTransactionResultsIndex(s.indexReporter, s.transactionResults), - EventQueryMode: IndexQueryModeLocalOnly, - TxResultQueryMode: IndexQueryModeLocalOnly, - EventsIndex: index.NewEventsIndex(s.indexReporter, s.events), - LastFullBlockHeight: s.lastFullBlockHeight, - ExecNodeIdentitiesProvider: commonrpc.NewExecutionNodeIdentitiesProvider( - s.log, - s.state, - s.receipts, - nil, - nil, - ), - ConnFactory: s.connectionFactory, + validatorBlocks := validatormock.NewBlocks(s.T()) + validatorBlocks. + On("HeaderByID", mock.Anything). + Return(s.finalizedBlock.Header, nil). + Maybe() // used for some tests + + validatorBlocks. + On("FinalizedHeader", mock.Anything). + Return(s.finalizedBlock.Header, nil). + Maybe() // used for some tests + + txValidator, err := validator.NewTransactionValidator( + validatorBlocks, + s.chainID.Chain(), + metrics.NewNoopCollector(), + validator.TransactionValidationOptions{ + MaxTransactionByteSize: flow.DefaultMaxTransactionByteSize, + MaxCollectionByteSize: flow.DefaultMaxCollectionByteSize, + }, + execmock.NewScriptExecutor(s.T()), + ) + s.Require().NoError(err) + + txResCache, err := lru.New[flow.Identifier, *accessmodel.TransactionResult](10) + s.Require().NoError(err) + + client := access.NewAccessAPIClient(s.T()) + client. + On("SendTransaction", mock.Anything, mock.Anything). + Return(&accessproto.SendTransactionResponse{}, nil). + Maybe() // used for some tests + + txParams := transactions.Params{ + Log: s.log, + Metrics: metrics.NewNoopCollector(), + State: s.state, + ChainID: s.chainID, + SystemTxID: s.systemTx.ID(), + SystemTx: s.systemTx, + StaticCollectionRPCClient: client, + HistoricalAccessNodeClients: nil, + NodeCommunicator: nodeCommunicator, + ConnFactory: s.connectionFactory, + EnableRetries: false, + NodeProvider: execNodeProvider, + Blocks: s.blocks, + Collections: s.collections, + Transactions: s.transactions, + TxErrorMessageProvider: errorMessageProvider, + TxResultCache: txResCache, + TxProvider: txProvider, + TxValidator: txValidator, + TxStatusDeriver: txStatusDeriver, + EventsIndex: s.eventIndex, + TxResultsIndex: s.txResultIndex, } + txBackend, err := transactions.NewTransactionsBackend(txParams) + s.Require().NoError(err) + + s.txStreamBackend = NewTransactionStreamBackend( + s.log, + s.state, + subscriptionHandler, + s.blockTracker, + txBackend.SendTransaction, + s.blocks, + s.collections, + s.transactions, + txProvider, + txStatusDeriver, + ) } -// initializeMainMockInstructions sets up the main mock behaviors for components used in TransactionStatusSuite tests. -func (s *TransactionStatusSuite) initializeMainMockInstructions() { +// initializeMainMockInstructions sets up the main mock behaviors for components used in TransactionStreamSuite tests. +func (s *TransactionStreamSuite) initializeMainMockInstructions() { s.transactions.On("Store", mock.Anything).Return(nil).Maybe() s.blocks.On("ByHeight", mock.AnythingOfType("uint64")).Return(mocks.StorageMapGetter(s.blockMap)).Maybe() @@ -282,7 +398,7 @@ func (s *TransactionStatusSuite) initializeMainMockInstructions() { } // initializeHappyCaseMockInstructions sets up mock behaviors for a happy-case scenario in transaction status testing. -func (s *TransactionStatusSuite) initializeHappyCaseMockInstructions() { +func (s *TransactionStreamSuite) initializeHappyCaseMockInstructions() { s.initializeMainMockInstructions() s.reporter.On("LowestIndexedHeight").Return(s.rootBlock.Header.Height, nil).Maybe() @@ -310,7 +426,7 @@ func (s *TransactionStatusSuite) initializeHappyCaseMockInstructions() { } // createSendTransaction generate sent transaction with ref block of the current finalized block -func (s *TransactionStatusSuite) createSendTransaction() flow.Transaction { +func (s *TransactionStreamSuite) createSendTransaction() flow.Transaction { transaction := unittest.TransactionFixture() transaction.SetReferenceBlockID(s.finalizedBlock.ID()) s.transactions.On("ByID", mock.AnythingOfType("flow.Identifier")).Return(&transaction.TransactionBody, nil).Maybe() @@ -318,7 +434,7 @@ func (s *TransactionStatusSuite) createSendTransaction() flow.Transaction { } // addNewFinalizedBlock sets up a new finalized block using the provided parent header and options, and optionally notifies via broadcasting. -func (s *TransactionStatusSuite) addNewFinalizedBlock(parent *flow.Header, notify bool, options ...func(*flow.Block)) { +func (s *TransactionStreamSuite) addNewFinalizedBlock(parent *flow.Header, notify bool, options ...func(*flow.Block)) { s.finalizedBlock = unittest.BlockWithParentFixture(parent) for _, option := range options { option(s.finalizedBlock) @@ -331,24 +447,24 @@ func (s *TransactionStatusSuite) addNewFinalizedBlock(parent *flow.Header, notif } } -func (s *TransactionStatusSuite) mockTransactionResult(transactionID *flow.Identifier, hasTransactionResultInStorage *bool) { - s.transactionResults.On( - "ByBlockIDTransactionID", - mock.AnythingOfType("flow.Identifier"), - mock.AnythingOfType("flow.Identifier"), - ).Return(func(blockID, txID flow.Identifier) (*flow.LightTransactionResult, error) { - if *hasTransactionResultInStorage { - return &flow.LightTransactionResult{ - TransactionID: *transactionID, - Failed: false, - ComputationUsed: 0, - }, nil - } - return nil, storage.ErrNotFound - }) +func (s *TransactionStreamSuite) mockTransactionResult(transactionID *flow.Identifier, hasTransactionResultInStorage *bool) { + s.transactionResults. + On("ByBlockIDTransactionID", mock.Anything, mock.Anything). + Return( + func(blockID, txID flow.Identifier) (*flow.LightTransactionResult, error) { + if *hasTransactionResultInStorage { + return &flow.LightTransactionResult{ + TransactionID: *transactionID, + Failed: false, + ComputationUsed: 0, + }, nil + } + return nil, storage.ErrNotFound + }, + ) } -func (s *TransactionStatusSuite) addBlockWithTransaction(transaction *flow.Transaction) { +func (s *TransactionStreamSuite) addBlockWithTransaction(transaction *flow.Transaction) { col := flow.CollectionFromTransactions([]*flow.Transaction{transaction}) colID := col.ID() guarantee := col.Guarantee() @@ -364,7 +480,7 @@ func (s *TransactionStatusSuite) addBlockWithTransaction(transaction *flow.Trans // Create a special common function to read subscription messages from the channel and check converting it to transaction info // and check results for correctness -func (s *TransactionStatusSuite) checkNewSubscriptionMessage(sub subscription.Subscription, txId flow.Identifier, expectedTxStatuses []flow.TransactionStatus) { +func (s *TransactionStreamSuite) checkNewSubscriptionMessage(sub subscription.Subscription, txId flow.Identifier, expectedTxStatuses []flow.TransactionStatus) { unittest.RequireReturnsBefore(s.T(), func() { v, ok := <-sub.Channel() require.True(s.T(), ok, @@ -385,7 +501,7 @@ func (s *TransactionStatusSuite) checkNewSubscriptionMessage(sub subscription.Su } // checkGracefulShutdown ensures the provided subscription shuts down gracefully within a specified timeout duration. -func (s *TransactionStatusSuite) checkGracefulShutdown(sub subscription.Subscription) { +func (s *TransactionStreamSuite) checkGracefulShutdown(sub subscription.Subscription) { // Ensure subscription shuts down gracefully unittest.RequireReturnsBefore(s.T(), func() { <-sub.Channel() @@ -395,7 +511,7 @@ func (s *TransactionStatusSuite) checkGracefulShutdown(sub subscription.Subscrip // TestSendAndSubscribeTransactionStatusHappyCase tests the functionality of the SubscribeTransactionStatusesFromStartBlockID method in the Backend. // It covers the emulation of transaction stages from pending to sealed, and receiving status updates. -func (s *TransactionStatusSuite) TestSendAndSubscribeTransactionStatusHappyCase() { +func (s *TransactionStreamSuite) TestSendAndSubscribeTransactionStatusHappyCase() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -411,7 +527,7 @@ func (s *TransactionStatusSuite) TestSendAndSubscribeTransactionStatusHappyCase( s.mockTransactionResult(&txId, &hasTransactionResultInStorage) // 1. Subscribe to transaction status and receive the first message with pending status - sub := s.backend.SendAndSubscribeTransactionStatuses(ctx, &transaction.TransactionBody, entities.EventEncodingVersion_CCF_V0) + sub := s.txStreamBackend.SendAndSubscribeTransactionStatuses(ctx, &transaction.TransactionBody, entities.EventEncodingVersion_CCF_V0) s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusPending}) // 2. Make transaction reference block sealed, and add a new finalized block that includes the transaction @@ -438,7 +554,7 @@ func (s *TransactionStatusSuite) TestSendAndSubscribeTransactionStatusHappyCase( // TestSendAndSubscribeTransactionStatusExpired tests the functionality of the SubscribeTransactionStatusesFromStartBlockID method in the Backend // when transaction become expired -func (s *TransactionStatusSuite) TestSendAndSubscribeTransactionStatusExpired() { +func (s *TransactionStreamSuite) TestSendAndSubscribeTransactionStatusExpired() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -461,7 +577,7 @@ func (s *TransactionStatusSuite) TestSendAndSubscribeTransactionStatusExpired() s.collections.On("LightByTransactionID", txId).Return(nil, storage.ErrNotFound) // Subscribe to transaction status and receive the first message with pending status - sub := s.backend.SendAndSubscribeTransactionStatuses(ctx, &transaction.TransactionBody, entities.EventEncodingVersion_CCF_V0) + sub := s.txStreamBackend.SendAndSubscribeTransactionStatuses(ctx, &transaction.TransactionBody, entities.EventEncodingVersion_CCF_V0) s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusPending}) // Generate 600 blocks without transaction included and check, that transaction still pending @@ -485,7 +601,7 @@ func (s *TransactionStatusSuite) TestSendAndSubscribeTransactionStatusExpired() } // TestSubscribeTransactionStatusWithCurrentPending verifies the subscription behavior for a transaction starting as pending. -func (s *TransactionStatusSuite) TestSubscribeTransactionStatusWithCurrentPending() { +func (s *TransactionStreamSuite) TestSubscribeTransactionStatusWithCurrentPending() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -498,7 +614,7 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusWithCurrentPendin hasTransactionResultInStorage := false s.mockTransactionResult(&txId, &hasTransactionResultInStorage) - sub := s.backend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) + sub := s.txStreamBackend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusPending}) s.addBlockWithTransaction(&transaction) @@ -519,7 +635,7 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusWithCurrentPendin } // TestSubscribeTransactionStatusWithCurrentFinalized verifies the subscription behavior for a transaction starting as finalized. -func (s *TransactionStatusSuite) TestSubscribeTransactionStatusWithCurrentFinalized() { +func (s *TransactionStreamSuite) TestSubscribeTransactionStatusWithCurrentFinalized() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -533,7 +649,7 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusWithCurrentFinali s.addBlockWithTransaction(&transaction) - sub := s.backend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) + sub := s.txStreamBackend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusPending, flow.TransactionStatusFinalized}) hasTransactionResultInStorage = true @@ -551,7 +667,7 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusWithCurrentFinali } // TestSubscribeTransactionStatusWithCurrentExecuted verifies the subscription behavior for a transaction starting as executed. -func (s *TransactionStatusSuite) TestSubscribeTransactionStatusWithCurrentExecuted() { +func (s *TransactionStreamSuite) TestSubscribeTransactionStatusWithCurrentExecuted() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -569,8 +685,15 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusWithCurrentExecut // init transaction result for storage hasTransactionResultInStorage = true s.addNewFinalizedBlock(s.finalizedBlock.Header, true) - sub := s.backend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) - s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusPending, flow.TransactionStatusFinalized, flow.TransactionStatusExecuted}) + sub := s.txStreamBackend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) + s.checkNewSubscriptionMessage( + sub, + txId, + []flow.TransactionStatus{ + flow.TransactionStatusPending, + flow.TransactionStatusFinalized, + flow.TransactionStatusExecuted, + }) // 4. Make the transaction block sealed, and add a new finalized block s.sealedBlock = s.finalizedBlock @@ -585,7 +708,7 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusWithCurrentExecut } // TestSubscribeTransactionStatusWithCurrentSealed verifies the subscription behavior for a transaction starting as sealed. -func (s *TransactionStatusSuite) TestSubscribeTransactionStatusWithCurrentSealed() { +func (s *TransactionStreamSuite) TestSubscribeTransactionStatusWithCurrentSealed() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -606,7 +729,7 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusWithCurrentSealed s.sealedBlock = s.finalizedBlock s.addNewFinalizedBlock(s.sealedBlock.Header, true) - sub := s.backend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) + sub := s.txStreamBackend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) s.checkNewSubscriptionMessage( sub, @@ -628,7 +751,7 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusWithCurrentSealed // TestSubscribeTransactionStatusFailedSubscription verifies the behavior of subscription when transaction status fails. // Ensures failure scenarios are handled correctly, such as missing sealed header, start height, or transaction by ID. -func (s *TransactionStatusSuite) TestSubscribeTransactionStatusFailedSubscription() { +func (s *TransactionStreamSuite) TestSubscribeTransactionStatusFailedSubscription() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -645,7 +768,7 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusFailedSubscriptio signalerCtx := irrecoverable.WithSignalerContext(ctx, irrecoverable.NewMockSignalerContextExpectError(s.T(), ctx, fmt.Errorf("failed to lookup sealed block: %w", expectedError))) - sub := s.backend.SubscribeTransactionStatuses(signalerCtx, txId, entities.EventEncodingVersion_CCF_V0) + sub := s.txStreamBackend.SubscribeTransactionStatuses(signalerCtx, txId, entities.EventEncodingVersion_CCF_V0) s.Assert().ErrorContains(sub.Err(), fmt.Errorf("failed to lookup sealed block: %w", expectedError).Error()) }) @@ -657,7 +780,7 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusFailedSubscriptio expectedError := storage.ErrNotFound s.blockTracker.On("GetStartHeightFromBlockID", s.sealedBlock.ID()).Return(uint64(0), expectedError).Once() - sub := s.backend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) + sub := s.txStreamBackend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) s.Assert().ErrorContains(sub.Err(), expectedError.Error()) }) } diff --git a/engine/access/rpc/backend/transaction_subscription_metadata.go b/engine/access/rpc/backend/transactions/stream/transaction_metadata.go similarity index 58% rename from engine/access/rpc/backend/transaction_subscription_metadata.go rename to engine/access/rpc/backend/transactions/stream/transaction_metadata.go index 9e48ba7cd1b..562372b1443 100644 --- a/engine/access/rpc/backend/transaction_subscription_metadata.go +++ b/engine/access/rpc/backend/transactions/stream/transaction_metadata.go @@ -1,4 +1,4 @@ -package backend +package stream import ( "context" @@ -8,6 +8,8 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + txprovider "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider" + txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" "github.com/onflow/flow-go/engine/access/subscription" "github.com/onflow/flow-go/engine/common/rpc" accessmodel "github.com/onflow/flow-go/model/access" @@ -19,50 +21,58 @@ import ( "github.com/onflow/flow/protobuf/go/flow/entities" ) -// transactionSubscriptionMetadata manages the state of a transaction subscription. +// TransactionMetadata manages the state of a transaction subscription. // // This struct contains metadata for tracking a transaction's progress, including // references to relevant blocks, collections, and transaction results. -type transactionSubscriptionMetadata struct { +type TransactionMetadata struct { blocks storage.Blocks collections storage.Collections transactions storage.Transactions - txResult *accessmodel.TransactionResult - txReferenceBlockID flow.Identifier - blockWithTx *flow.Header + txResult *accessmodel.TransactionResult + txReferenceBlockID flow.Identifier + blockWithTx *flow.Header //TODO: what is this??? + eventEncodingVersion entities.EventEncodingVersion - backendTransactions *backendTransactions + + txProvider *txprovider.FailoverTransactionProvider + txStatusDeriver *txstatus.TxStatusDeriver } -// newTransactionSubscriptionMetadata initializes a new metadata object for a transaction subscription. +// NewTransactionMetadata initializes a new metadata object for a transaction subscription. // // This function constructs a transaction metadata object used for tracking the transaction's progress // and maintaining its state throughout execution. // // Parameters: // - ctx: Context for managing the lifecycle of the operation. -// - backendTransactions: A reference to the backend transaction manager. +// - backendTransactions: A reference to the txStreamBackend transaction manager. // - txID: The unique identifier of the transaction. // - txReferenceBlockID: The ID of the transaction’s reference block. // - eventEncodingVersion: The required version of event encoding. // // Returns: -// - *transactionSubscriptionMetadata: The initialized transaction metadata object. -func newTransactionSubscriptionMetadata( - backendTransactions *backendTransactions, +// - *TransactionMetadata: The initialized transaction metadata object. +func NewTransactionMetadata( + blocks storage.Blocks, + collections storage.Collections, + transactions storage.Transactions, txID flow.Identifier, txReferenceBlockID flow.Identifier, eventEncodingVersion entities.EventEncodingVersion, -) *transactionSubscriptionMetadata { - return &transactionSubscriptionMetadata{ - backendTransactions: backendTransactions, + txProvider *txprovider.FailoverTransactionProvider, + txStatusDeriver *txstatus.TxStatusDeriver, +) *TransactionMetadata { + return &TransactionMetadata{ txResult: &accessmodel.TransactionResult{TransactionID: txID}, eventEncodingVersion: eventEncodingVersion, - blocks: backendTransactions.blocks, - collections: backendTransactions.collections, - transactions: backendTransactions.transactions, + blocks: blocks, + collections: collections, + transactions: transactions, txReferenceBlockID: txReferenceBlockID, + txProvider: txProvider, + txStatusDeriver: txStatusDeriver, } } @@ -77,17 +87,17 @@ func newTransactionSubscriptionMetadata( // // All other errors are considered as state corruption (fatal) or internal errors in the refreshing transaction result // or when refreshing transaction status. -func (tm *transactionSubscriptionMetadata) Refresh(ctx context.Context) error { - if err := tm.refreshCollection(); err != nil { +func (t *TransactionMetadata) Refresh(ctx context.Context) error { + if err := t.refreshCollection(); err != nil { return err } - if err := tm.refreshBlock(); err != nil { + if err := t.refreshBlock(); err != nil { return err } - if err := tm.refreshTransactionResult(ctx); err != nil { + if err := t.refreshTransactionResult(ctx); err != nil { return err } - if err := tm.refreshStatus(ctx); err != nil { + if err := t.refreshStatus(ctx); err != nil { return err } return nil @@ -101,17 +111,17 @@ func (tm *transactionSubscriptionMetadata) Refresh(ctx context.Context) error { // - txReferenceBlockID: The reference block ID of the transaction. // // No errors expected during normal operations. -func (tm *transactionSubscriptionMetadata) refreshTransactionReferenceBlockID() error { +func (t *TransactionMetadata) refreshTransactionReferenceBlockID() error { // Get referenceBlockID if it is not set - if tm.txReferenceBlockID != flow.ZeroID { + if t.txReferenceBlockID != flow.ZeroID { return nil } - tx, err := tm.transactions.ByID(tm.txResult.TransactionID) + tx, err := t.transactions.ByID(t.txResult.TransactionID) if err != nil { return fmt.Errorf("failed to lookup transaction by transaction ID: %w", err) } - tm.txReferenceBlockID = tx.ReferenceBlockID + t.txReferenceBlockID = tx.ReferenceBlockID return nil } @@ -121,20 +131,20 @@ func (tm *transactionSubscriptionMetadata) refreshTransactionReferenceBlockID() // - ctx: Context for managing the operation lifecycle. // // No errors expected during normal operations. -func (tm *transactionSubscriptionMetadata) refreshStatus(ctx context.Context) error { +func (t *TransactionMetadata) refreshStatus(ctx context.Context) error { var err error - if tm.blockWithTx == nil { - if err = tm.refreshTransactionReferenceBlockID(); err != nil { + if t.blockWithTx == nil { + if err = t.refreshTransactionReferenceBlockID(); err != nil { // transaction was not sent from this node, and it has not been indexed yet. if errors.Is(err, storage.ErrNotFound) { - tm.txResult.Status = flow.TransactionStatusUnknown + t.txResult.Status = flow.TransactionStatusUnknown return nil } return err } - tm.txResult.Status, err = tm.backendTransactions.DeriveUnknownTransactionStatus(tm.txReferenceBlockID) + t.txResult.Status, err = t.txStatusDeriver.DeriveUnknownTransactionStatus(t.txReferenceBlockID) if err != nil { if !errors.Is(err, state.ErrUnknownSnapshotReference) { irrecoverable.Throw(ctx, err) @@ -146,7 +156,7 @@ func (tm *transactionSubscriptionMetadata) refreshStatus(ctx context.Context) er // When the transaction is included in an executed block, the `txResult` may be updated during `Refresh` // Recheck the status to ensure it's accurate. - tm.txResult.Status, err = tm.backendTransactions.DeriveTransactionStatus(tm.blockWithTx.Height, tm.txResult.IsExecuted()) + t.txResult.Status, err = t.txStatusDeriver.DeriveTransactionStatus(t.blockWithTx.Height, t.txResult.IsExecuted()) if err != nil { if !errors.Is(err, state.ErrUnknownSnapshotReference) { irrecoverable.Throw(ctx, err) @@ -162,12 +172,12 @@ func (tm *transactionSubscriptionMetadata) refreshStatus(ctx context.Context) er // - [ErrBlockNotReady] if the block for collection ID is not found. // // All other errors should be treated as exceptions. -func (tm *transactionSubscriptionMetadata) refreshBlock() error { - if tm.txResult.CollectionID == flow.ZeroID || tm.blockWithTx != nil { +func (t *TransactionMetadata) refreshBlock() error { + if t.txResult.CollectionID == flow.ZeroID || t.blockWithTx != nil { return nil } - block, err := tm.blocks.ByCollectionID(tm.txResult.CollectionID) + block, err := t.blocks.ByCollectionID(t.txResult.CollectionID) if err != nil { if errors.Is(err, storage.ErrNotFound) { return subscription.ErrBlockNotReady @@ -176,9 +186,9 @@ func (tm *transactionSubscriptionMetadata) refreshBlock() error { return fmt.Errorf("failed to lookup block containing collection: %w", err) } - tm.blockWithTx = block.Header - tm.txResult.BlockID = block.ID() - tm.txResult.BlockHeight = block.Header.Height + t.blockWithTx = block.Header + t.txResult.BlockID = block.ID() + t.txResult.BlockHeight = block.Header.Height return nil } @@ -188,19 +198,19 @@ func (tm *transactionSubscriptionMetadata) refreshBlock() error { // - [ErrTransactionNotInBlock] if the transaction is not found in the block. // // All other errors should be treated as exceptions. -func (tm *transactionSubscriptionMetadata) refreshCollection() error { - if tm.txResult.CollectionID != flow.ZeroID { +func (t *TransactionMetadata) refreshCollection() error { + if t.txResult.CollectionID != flow.ZeroID { return nil } - collection, err := tm.collections.LightByTransactionID(tm.txResult.TransactionID) + collection, err := t.collections.LightByTransactionID(t.txResult.TransactionID) if err != nil { if errors.Is(err, storage.ErrNotFound) { return nil } return fmt.Errorf("failed to lookup collection containing tx: %w", err) } - tm.txResult.CollectionID = collection.ID() + t.txResult.CollectionID = collection.ID() return nil } @@ -213,38 +223,34 @@ func (tm *transactionSubscriptionMetadata) refreshCollection() error { // - [codes.NotFound] if the transaction result is unavailable. // // All other errors should be treated as exceptions. -func (tm *transactionSubscriptionMetadata) refreshTransactionResult(ctx context.Context) error { +func (t *TransactionMetadata) refreshTransactionResult(ctx context.Context) error { // skip check if we already have the result, or if we don't know which block it is in yet - if tm.blockWithTx == nil || tm.txResult.IsExecuted() { + if t.blockWithTx == nil || t.txResult.IsExecuted() { return nil } - // Trying to get transaction result from local storage - txResult, err := tm.backendTransactions.GetTransactionResultFromStorage(ctx, tm.blockWithTx, tm.txResult.TransactionID, tm.eventEncodingVersion) + txResult, err := t.txProvider.TransactionResult( + ctx, + t.blockWithTx, + t.txResult.TransactionID, + t.eventEncodingVersion, + ) if err != nil { - if status.Code(err) != codes.FailedPrecondition && - status.Code(err) != codes.OutOfRange && - status.Code(err) != codes.NotFound { - return fmt.Errorf("unexpected error while getting transaction result from storage: %w", err) + // TODO: I don't like the fact we propagate this error from txProvider. + // Fix it during error handling polishing project + if status.Code(err) == codes.NotFound { + // No result yet, indicate that it has not been executed + return nil } - // If any error occurs with local storage - request transaction result from EN - txResult, err = tm.backendTransactions.GetTransactionResultFromExecutionNode(ctx, tm.blockWithTx, tm.txResult.TransactionID, tm.eventEncodingVersion) - if err != nil { - // if either the execution node reported no results - if status.Code(err) == codes.NotFound { - // No result yet, indicate that it has not been executed - return nil - } - return fmt.Errorf("failed to get transaction result from execution node: %w", err) - } + return fmt.Errorf("unexpected error while getting transaction result: %w", err) } // If transaction result was found, fully replace it in metadata. New transaction status already included in result. if txResult != nil { // Preserve the CollectionID to ensure it is not lost during the transaction result assignment. - txResult.CollectionID = tm.txResult.CollectionID - tm.txResult = txResult + txResult.CollectionID = t.txResult.CollectionID + t.txResult = txResult } return nil diff --git a/engine/access/rpc/backend/transactions/transactions.go b/engine/access/rpc/backend/transactions/transactions.go new file mode 100644 index 00000000000..22d99ece7d1 --- /dev/null +++ b/engine/access/rpc/backend/transactions/transactions.go @@ -0,0 +1,630 @@ +package transactions + +import ( + "context" + "errors" + "fmt" + "time" + + lru "github.com/hashicorp/golang-lru/v2" + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/access/validator" + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/retrier" + txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/rpc" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// ErrTransactionNotInBlock represents an error indicating that the transaction is not found in the block. +var ErrTransactionNotInBlock = errors.New("transaction not in block") + +type Transactions struct { + log zerolog.Logger + metrics module.TransactionMetrics + + state protocol.State + chainID flow.ChainID + + systemTxID flow.Identifier + systemTx *flow.TransactionBody + + // RPC Clients & Network + collectionRPCClient accessproto.AccessAPIClient // RPC client tied to a fixed collection node + historicalAccessNodeClients []accessproto.AccessAPIClient + nodeCommunicator node_communicator.Communicator + connectionFactory connection.ConnectionFactory + retrier retrier.Retrier + + // Storages + blocks storage.Blocks + collections storage.Collections + transactions storage.Transactions + + txResultCache *lru.Cache[flow.Identifier, *accessmodel.TransactionResult] + + txValidator *validator.TransactionValidator + txProvider provider.TransactionProvider + txStatusDeriver *txstatus.TxStatusDeriver +} + +var _ access.TransactionsAPI = (*Transactions)(nil) + +type Params struct { + Log zerolog.Logger + Metrics module.TransactionMetrics + State protocol.State + ChainID flow.ChainID + SystemTxID flow.Identifier + SystemTx *flow.TransactionBody + StaticCollectionRPCClient accessproto.AccessAPIClient + HistoricalAccessNodeClients []accessproto.AccessAPIClient + NodeCommunicator node_communicator.Communicator + ConnFactory connection.ConnectionFactory + EnableRetries bool + NodeProvider *commonrpc.ExecutionNodeIdentitiesProvider + Blocks storage.Blocks + Collections storage.Collections + Transactions storage.Transactions + TxErrorMessageProvider error_messages.Provider + TxResultCache *lru.Cache[flow.Identifier, *accessmodel.TransactionResult] + TxProvider provider.TransactionProvider + TxValidator *validator.TransactionValidator + TxStatusDeriver *txstatus.TxStatusDeriver + EventsIndex *index.EventsIndex + TxResultsIndex *index.TransactionResultsIndex +} + +func NewTransactionsBackend(params Params) (*Transactions, error) { + txs := &Transactions{ + log: params.Log, + metrics: params.Metrics, + state: params.State, + chainID: params.ChainID, + systemTxID: params.SystemTxID, + systemTx: params.SystemTx, + collectionRPCClient: params.StaticCollectionRPCClient, + historicalAccessNodeClients: params.HistoricalAccessNodeClients, + nodeCommunicator: params.NodeCommunicator, + connectionFactory: params.ConnFactory, + blocks: params.Blocks, + collections: params.Collections, + transactions: params.Transactions, + txResultCache: params.TxResultCache, + txValidator: params.TxValidator, + txProvider: params.TxProvider, + txStatusDeriver: params.TxStatusDeriver, + } + + if params.EnableRetries { + txs.retrier = retrier.NewRetrier( + params.Log, + params.Blocks, + params.Collections, + txs, + params.TxStatusDeriver, + ) + } else { + txs.retrier = retrier.NewNoopRetrier() + } + + return txs, nil +} + +// SendTransaction forwards the transaction to the collection node +func (t *Transactions) SendTransaction(ctx context.Context, tx *flow.TransactionBody) error { + now := time.Now().UTC() + + err := t.txValidator.Validate(ctx, tx) + if err != nil { + return status.Errorf(codes.InvalidArgument, "invalid transaction: %s", err.Error()) + } + + // send the transaction to the collection node if valid + err = t.trySendTransaction(ctx, tx) + if err != nil { + t.metrics.TransactionSubmissionFailed() + return rpc.ConvertError(err, "failed to send transaction to a collection node", codes.Internal) + } + + t.metrics.TransactionReceived(tx.ID(), now) + + // store the transaction locally + err = t.transactions.Store(tx) + if err != nil { + return status.Errorf(codes.Internal, "failed to store transaction: %v", err) + } + + go t.registerTransactionForRetry(tx) + + return nil +} + +// trySendTransaction tries to transaction to a collection node +func (t *Transactions) trySendTransaction(ctx context.Context, tx *flow.TransactionBody) error { + // if a collection node rpc client was provided at startup, just use that + if t.collectionRPCClient != nil { + return t.grpcTxSend(ctx, t.collectionRPCClient, tx) + } + + // otherwise choose all collection nodes to try + collNodes, err := t.chooseCollectionNodes(tx.ID()) + if err != nil { + return fmt.Errorf("failed to determine collection node for tx %x: %w", tx, err) + } + + var sendError error + logAnyError := func() { + if sendError != nil { + t.log.Info().Err(err).Msg("failed to send transactions to collector nodes") + } + } + defer logAnyError() + + // try sending the transaction to one of the chosen collection nodes + sendError = t.nodeCommunicator.CallAvailableNode( + collNodes, + func(node *flow.IdentitySkeleton) error { + err = t.sendTransactionToCollector(ctx, tx, node.Address) + if err != nil { + return err + } + return nil + }, + nil, + ) + + return sendError +} + +// chooseCollectionNodes finds a random subset of size sampleSize of collection node addresses from the +// collection node cluster responsible for the given tx +func (t *Transactions) chooseCollectionNodes(txID flow.Identifier) (flow.IdentitySkeletonList, error) { + // retrieve the set of collector clusters + currentEpoch, err := t.state.Final().Epochs().Current() + if err != nil { + return nil, fmt.Errorf("could not get current epoch: %w", err) + } + clusters, err := currentEpoch.Clustering() + if err != nil { + return nil, fmt.Errorf("could not cluster collection nodes: %w", err) + } + + // get the cluster responsible for the transaction + targetNodes, ok := clusters.ByTxID(txID) + if !ok { + return nil, fmt.Errorf("could not get local cluster by txID: %x", txID) + } + + return targetNodes, nil +} + +// sendTransactionToCollection sends the transaction to the given collection node via grpc +func (t *Transactions) sendTransactionToCollector( + ctx context.Context, + tx *flow.TransactionBody, + collectionNodeAddr string, +) error { + collectionRPC, closer, err := t.connectionFactory.GetAccessAPIClient(collectionNodeAddr, nil) + if err != nil { + return fmt.Errorf("failed to connect to collection node at %s: %w", collectionNodeAddr, err) + } + defer closer.Close() + + err = t.grpcTxSend(ctx, collectionRPC, tx) + if err != nil { + return fmt.Errorf("failed to send transaction to collection node at %s: %w", collectionNodeAddr, err) + } + return nil +} + +func (t *Transactions) grpcTxSend( + ctx context.Context, + client accessproto.AccessAPIClient, + tx *flow.TransactionBody, +) error { + colReq := &accessproto.SendTransactionRequest{ + Transaction: convert.TransactionToMessage(*tx), + } + + clientDeadline := time.Now().Add(time.Duration(2) * time.Second) + ctx, cancel := context.WithDeadline(ctx, clientDeadline) + defer cancel() + + _, err := client.SendTransaction(ctx, colReq) + return err +} + +// SendRawTransaction sends a raw transaction to the collection node +func (t *Transactions) SendRawTransaction( + ctx context.Context, + tx *flow.TransactionBody, +) error { + // send the transaction to the collection node + return t.trySendTransaction(ctx, tx) +} + +func (t *Transactions) GetTransaction(ctx context.Context, txID flow.Identifier) (*flow.TransactionBody, error) { + // look up transaction from storage + tx, err := t.transactions.ByID(txID) + txErr := rpc.ConvertStorageError(err) + + if txErr != nil { + if status.Code(txErr) == codes.NotFound { + return t.getHistoricalTransaction(ctx, txID) + } + // Other Error trying to retrieve the transaction, return with err + return nil, txErr + } + + return tx, nil +} + +func (t *Transactions) GetTransactionsByBlockID( + _ context.Context, + blockID flow.Identifier, +) ([]*flow.TransactionBody, error) { + var transactions []*flow.TransactionBody + + // TODO: consider using storage.Index.ByBlockID, the index contains collection id and seals ID + block, err := t.blocks.ByID(blockID) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + for _, guarantee := range block.Payload.Guarantees { + collection, err := t.collections.ByID(guarantee.CollectionID) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + transactions = append(transactions, collection.Transactions...) + } + + transactions = append(transactions, t.systemTx) + + return transactions, nil +} + +func (t *Transactions) GetTransactionResult( + ctx context.Context, + txID flow.Identifier, + blockID flow.Identifier, + collectionID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + // look up transaction from storage + start := time.Now() + + tx, err := t.transactions.ByID(txID) + if err != nil { + txErr := rpc.ConvertStorageError(err) + if status.Code(txErr) != codes.NotFound { + return nil, txErr + } + + // Tx not found. If we have historical Sporks setup, lets look through those as well + if t.txResultCache != nil { + val, ok := t.txResultCache.Get(txID) + if ok { + return val, nil + } + } + historicalTxResult, err := t.getHistoricalTransactionResult(ctx, txID) + if err != nil { + // if tx not found in old access nodes either, then assume that the tx was submitted to a different AN + // and return status as unknown + txStatus := flow.TransactionStatusUnknown + result := &accessmodel.TransactionResult{ + Status: txStatus, + StatusCode: uint(txStatus), + } + if t.txResultCache != nil { + t.txResultCache.Add(txID, result) + } + return result, nil + } + + if t.txResultCache != nil { + t.txResultCache.Add(txID, historicalTxResult) + } + return historicalTxResult, nil + } + + block, err := t.retrieveBlock(blockID, collectionID, txID) + // an error occurred looking up the block or the requested block or collection was not found. + // If looking up the block based solely on the txID returns not found, then no error is + // returned since the block may not be finalized yet. + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + var blockHeight uint64 + var txResult *accessmodel.TransactionResult + // access node may not have the block if it hasn't yet been finalized, hence block can be nil at this point + if block != nil { + txResult, err = t.lookupTransactionResult(ctx, txID, block.Header, requiredEventEncodingVersion) + if err != nil { + return nil, rpc.ConvertError(err, "failed to retrieve result", codes.Internal) + } + + // an additional check to ensure the correctness of the collection ID. + expectedCollectionID, err := t.lookupCollectionIDInBlock(block, txID) + if err != nil { + // if the collection has not been indexed yet, the lookup will return a not found error. + // if the request included a blockID or collectionID in its the search criteria, not found + // should result in an error because it's not possible to guarantee that the result found + // is the correct one. + if blockID != flow.ZeroID || collectionID != flow.ZeroID { + return nil, rpc.ConvertStorageError(err) + } + } + + if collectionID == flow.ZeroID { + collectionID = expectedCollectionID + } else if collectionID != expectedCollectionID { + return nil, status.Error(codes.InvalidArgument, "transaction not found in provided collection") + } + + blockID = block.ID() + blockHeight = block.Header.Height + } + + // If there is still no transaction result, provide one based on available information. + if txResult == nil { + var txStatus flow.TransactionStatus + // Derive the status of the transaction. + if block == nil { + txStatus, err = t.txStatusDeriver.DeriveUnknownTransactionStatus(tx.ReferenceBlockID) + } else { + txStatus, err = t.txStatusDeriver.DeriveTransactionStatus(blockHeight, false) + } + + if err != nil { + if !errors.Is(err, state.ErrUnknownSnapshotReference) { + irrecoverable.Throw(ctx, err) + } + return nil, rpc.ConvertStorageError(err) + } + + txResult = &accessmodel.TransactionResult{ + BlockID: blockID, + BlockHeight: blockHeight, + TransactionID: txID, + Status: txStatus, + CollectionID: collectionID, + } + } else { + txResult.CollectionID = collectionID + } + + t.metrics.TransactionResultFetched(time.Since(start), len(tx.Script)) + + return txResult, nil +} + +// lookupCollectionIDInBlock returns the collection ID based on the transaction ID. The lookup is performed in block +// collections. +func (t *Transactions) lookupCollectionIDInBlock( + block *flow.Block, + txID flow.Identifier, +) (flow.Identifier, error) { + for _, guarantee := range block.Payload.Guarantees { + collectionID := guarantee.ID() + collection, err := t.collections.LightByID(collectionID) + if err != nil { + return flow.ZeroID, fmt.Errorf("failed to get collection %s in indexed block: %w", collectionID, err) + } + for _, collectionTxID := range collection.Transactions { + if collectionTxID == txID { + return collectionID, nil + } + } + } + return flow.ZeroID, ErrTransactionNotInBlock +} + +// retrieveBlock function returns a block based on the input arguments. +// The block ID lookup has the highest priority, followed by the collection ID lookup. +// If both are missing, the default lookup by transaction ID is performed. +// +// If looking up the block based solely on the txID returns not found, then no error is returned. +// +// Expected errors: +// - storage.ErrNotFound if the requested block or collection was not found. +func (t *Transactions) retrieveBlock( + blockID flow.Identifier, + collectionID flow.Identifier, + txID flow.Identifier, +) (*flow.Block, error) { + if blockID != flow.ZeroID { + return t.blocks.ByID(blockID) + } + + if collectionID != flow.ZeroID { + return t.blocks.ByCollectionID(collectionID) + } + + // find the block for the transaction + block, err := t.lookupBlock(txID) + + if err != nil && !errors.Is(err, storage.ErrNotFound) { + return nil, err + } + + return block, nil +} + +func (t *Transactions) GetTransactionResultsByBlockID( + ctx context.Context, + blockID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) ([]*accessmodel.TransactionResult, error) { + // TODO: consider using storage.Index.ByBlockID, the index contains collection id and seals ID + block, err := t.blocks.ByID(blockID) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + return t.txProvider.TransactionResultsByBlockID(ctx, block, requiredEventEncodingVersion) +} + +// GetTransactionResultByIndex returns transactions Results for an index in a block that is executed, +// pending or finalized transactions return errors +func (t *Transactions) GetTransactionResultByIndex( + ctx context.Context, + blockID flow.Identifier, + index uint32, + requiredEventEncodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + block, err := t.blocks.ByID(blockID) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + return t.txProvider.TransactionResultByIndex(ctx, block, index, requiredEventEncodingVersion) +} + +// GetSystemTransaction returns system transaction +func (t *Transactions) GetSystemTransaction(_ context.Context, _ flow.Identifier) (*flow.TransactionBody, error) { + return t.systemTx, nil +} + +// GetSystemTransactionResult returns system transaction result +func (t *Transactions) GetSystemTransactionResult(ctx context.Context, blockID flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) (*accessmodel.TransactionResult, error) { + block, err := t.blocks.ByID(blockID) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + return t.lookupTransactionResult(ctx, t.systemTxID, block.Header, requiredEventEncodingVersion) +} + +// Error returns: +// - `storage.ErrNotFound` - collection referenced by transaction or block by a collection has not been found. +// - all other errors are unexpected and potentially symptoms of internal implementation bugs or state corruption (fatal). +func (t *Transactions) lookupBlock(txID flow.Identifier) (*flow.Block, error) { + collection, err := t.collections.LightByTransactionID(txID) + if err != nil { + return nil, err + } + + block, err := t.blocks.ByCollectionID(collection.ID()) + if err != nil { + return nil, err + } + + return block, nil +} + +func (t *Transactions) lookupTransactionResult( + ctx context.Context, + txID flow.Identifier, + header *flow.Header, + requiredEventEncodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + txResult, err := t.txProvider.TransactionResult(ctx, header, txID, requiredEventEncodingVersion) + if err != nil { + // if either the storage or execution node reported no results or there were not enough execution results + if status.Code(err) == codes.NotFound { + // No result yet, indicate that it has not been executed + return nil, nil + } + // Other Error trying to retrieve the result, return with err + return nil, err + } + + // considered executed as long as some result is returned, even if it's an error message + return txResult, nil +} + +func (t *Transactions) getHistoricalTransaction( + ctx context.Context, + txID flow.Identifier, +) (*flow.TransactionBody, error) { + for _, historicalNode := range t.historicalAccessNodeClients { + txResp, err := historicalNode.GetTransaction(ctx, &accessproto.GetTransactionRequest{Id: txID[:]}) + if err == nil { + tx, err := convert.MessageToTransaction(txResp.Transaction, t.chainID.Chain()) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not convert transaction: %v", err) + } + + // Found on a historical node. Report + return &tx, nil + } + // Otherwise, if not found, just continue + if status.Code(err) == codes.NotFound { + continue + } + // TODO should we do something if the error isn't not found? + } + return nil, status.Errorf(codes.NotFound, "no known transaction with ID %s", txID) +} + +func (t *Transactions) getHistoricalTransactionResult( + ctx context.Context, + txID flow.Identifier, +) (*accessmodel.TransactionResult, error) { + for _, historicalNode := range t.historicalAccessNodeClients { + result, err := historicalNode.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{Id: txID[:]}) + if err == nil { + // Found on a historical node. Report + if result.GetStatus() == entities.TransactionStatus_UNKNOWN { + // We've moved to returning Status UNKNOWN instead of an error with the NotFound status, + // Therefore we should continue and look at the next access node for answers. + continue + } + + if result.GetStatus() == entities.TransactionStatus_PENDING { + // This is on a historical node. No transactions from it will ever be + // executed, therefore we should consider this expired + result.Status = entities.TransactionStatus_EXPIRED + } + + return convert.MessageToTransactionResult(result), nil + } + // Otherwise, if not found, just continue + if status.Code(err) == codes.NotFound { + continue + } + // TODO should we do something if the error isn't not found? + } + return nil, status.Errorf(codes.NotFound, "no known transaction with ID %s", txID) +} + +func (t *Transactions) registerTransactionForRetry(tx *flow.TransactionBody) { + referenceBlock, err := t.state.AtBlockID(tx.ReferenceBlockID).Head() + if err != nil { + return + } + + t.retrier.RegisterTransaction(referenceBlock.Height, tx) +} + +// ATTENTION: might be a source of problems in future. We run this code on finalization gorotuine, +// potentially lagging finalization events if operations take long time. +// We might need to move this logic on dedicated goroutine and provide a way to skip finalization events if they are delivered +// too often for this engine. An example of similar approach - https://github.com/onflow/flow-go/blob/10b0fcbf7e2031674c00f3cdd280f27bd1b16c47/engine/common/follower/compliance_engine.go#L201.. +// No errors expected during normal operations. +func (t *Transactions) ProcessFinalizedBlockHeight(height uint64) error { + return t.retrier.Retry(height) +} diff --git a/engine/access/rpc/backend/transactions/transactions_test.go b/engine/access/rpc/backend/transactions/transactions_test.go new file mode 100644 index 00000000000..d1f507ffa7b --- /dev/null +++ b/engine/access/rpc/backend/transactions/transactions_test.go @@ -0,0 +1,1222 @@ +package transactions + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "os" + "testing" + + "github.com/dgraph-io/badger/v2" + lru "github.com/hashicorp/golang-lru/v2" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/access/validator" + validatormock "github.com/onflow/flow-go/access/validator/mock" + "github.com/onflow/flow-go/engine/access/index" + accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/retrier" + txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/fvm/blueprints" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/counters" + execmock "github.com/onflow/flow-go/module/execution/mock" + "github.com/onflow/flow-go/module/metrics" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" + "github.com/onflow/flow-go/state/protocol" + bprotocol "github.com/onflow/flow-go/state/protocol/badger" + protocolmock "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/state/protocol/util" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/generator" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +const expectedErrorMsg = "expected test error" + +type Suite struct { + suite.Suite + + log zerolog.Logger + state *protocolmock.State + snapshot *protocolmock.Snapshot + + blocks *storagemock.Blocks + headers *storagemock.Headers + collections *storagemock.Collections + transactions *storagemock.Transactions + receipts *storagemock.ExecutionReceipts + results *storagemock.ExecutionResults + lightTxResults *storagemock.LightTransactionResults + events *storagemock.Events + txResultErrorMessages *storagemock.TransactionResultErrorMessages + txResultCache *lru.Cache[flow.Identifier, *accessmodel.TransactionResult] + + db *badger.DB + dbDir string + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter + + executionAPIClient *accessmock.ExecutionAPIClient + historicalAccessAPIClient *accessmock.AccessAPIClient + + connectionFactory *connectionmock.ConnectionFactory + + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + eventsIndex *index.EventsIndex + txResultsIndex *index.TransactionResultsIndex + + errorMessageProvider error_messages.Provider + + chainID flow.ChainID + systemTx *flow.TransactionBody + + fixedExecutionNodeIDs flow.IdentifierList + preferredExecutionNodeIDs flow.IdentifierList +} + +func TestTransactionsBackend(t *testing.T) { + suite.Run(t, new(Suite)) +} + +func (suite *Suite) SetupTest() { + suite.log = zerolog.New(zerolog.NewConsoleWriter()) + suite.snapshot = protocolmock.NewSnapshot(suite.T()) + + header := unittest.BlockHeaderFixture() + params := protocolmock.NewParams(suite.T()) + params.On("FinalizedRoot").Return(header, nil).Maybe() + params.On("SporkID").Return(unittest.IdentifierFixture(), nil).Maybe() + params.On("SporkRootBlockHeight").Return(header.Height, nil).Maybe() + params.On("SealedRoot").Return(header, nil).Maybe() + + suite.state = protocolmock.NewState(suite.T()) + suite.state.On("Params").Return(params).Maybe() + + suite.blocks = storagemock.NewBlocks(suite.T()) + suite.headers = storagemock.NewHeaders(suite.T()) + suite.transactions = storagemock.NewTransactions(suite.T()) + suite.collections = storagemock.NewCollections(suite.T()) + suite.receipts = storagemock.NewExecutionReceipts(suite.T()) + suite.results = storagemock.NewExecutionResults(suite.T()) + suite.txResultErrorMessages = storagemock.NewTransactionResultErrorMessages(suite.T()) + suite.executionAPIClient = accessmock.NewExecutionAPIClient(suite.T()) + suite.lightTxResults = storagemock.NewLightTransactionResults(suite.T()) + suite.events = storagemock.NewEvents(suite.T()) + suite.chainID = flow.Testnet + suite.historicalAccessAPIClient = accessmock.NewAccessAPIClient(suite.T()) + suite.connectionFactory = connectionmock.NewConnectionFactory(suite.T()) + + txResCache, err := lru.New[flow.Identifier, *accessmodel.TransactionResult](10) + suite.Require().NoError(err) + suite.txResultCache = txResCache + + suite.reporter = syncmock.NewIndexReporter(suite.T()) + suite.indexReporter = index.NewReporter() + err = suite.indexReporter.Initialize(suite.reporter) + suite.Require().NoError(err) + suite.eventsIndex = index.NewEventsIndex(suite.indexReporter, suite.events) + suite.txResultsIndex = index.NewTransactionResultsIndex(suite.indexReporter, suite.lightTxResults) + + suite.systemTx, err = blueprints.SystemChunkTransaction(flow.Testnet.Chain()) + suite.Require().NoError(err) + + suite.db, suite.dbDir = unittest.TempBadgerDB(suite.T()) + progress, err := store.NewConsumerProgress(badgerimpl.ToDB(suite.db), module.ConsumeProgressLastFullBlockHeight).Initialize(0) + require.NoError(suite.T(), err) + suite.lastFullBlockHeight, err = counters.NewPersistentStrictMonotonicCounter(progress) + suite.Require().NoError(err) + + suite.fixedExecutionNodeIDs = nil + suite.preferredExecutionNodeIDs = nil + suite.errorMessageProvider = nil +} + +func (suite *Suite) TearDownTest() { + err := os.RemoveAll(suite.dbDir) + suite.Require().NoError(err) +} + +func (suite *Suite) defaultTransactionsParams() Params { + nodeProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + suite.log, + suite.state, + suite.receipts, + suite.preferredExecutionNodeIDs, + suite.fixedExecutionNodeIDs, + ) + + txStatusDeriver := txstatus.NewTxStatusDeriver( + suite.state, + suite.lastFullBlockHeight, + ) + + txValidator, err := validator.NewTransactionValidator( + validatormock.NewBlocks(suite.T()), + suite.chainID.Chain(), + metrics.NewNoopCollector(), + validator.TransactionValidationOptions{}, + execmock.NewScriptExecutor(suite.T()), + ) + suite.Require().NoError(err) + + nodeCommunicator := node_communicator.NewNodeCommunicator(false) + + txProvider := provider.NewENTransactionProvider( + suite.log, + suite.state, + suite.collections, + suite.connectionFactory, + nodeCommunicator, + nodeProvider, + txStatusDeriver, + suite.systemTx.ID(), + suite.systemTx, + ) + + return Params{ + Log: suite.log, + Metrics: metrics.NewNoopCollector(), + State: suite.state, + ChainID: flow.Testnet, + SystemTxID: suite.systemTx.ID(), + SystemTx: suite.systemTx, + StaticCollectionRPCClient: suite.historicalAccessAPIClient, + HistoricalAccessNodeClients: nil, + NodeCommunicator: nodeCommunicator, + ConnFactory: suite.connectionFactory, + EnableRetries: true, + NodeProvider: nodeProvider, + Blocks: suite.blocks, + Collections: suite.collections, + Transactions: suite.transactions, + TxErrorMessageProvider: suite.errorMessageProvider, + TxResultCache: suite.txResultCache, + TxProvider: txProvider, + TxValidator: txValidator, + TxStatusDeriver: txStatusDeriver, + EventsIndex: suite.eventsIndex, + TxResultsIndex: suite.txResultsIndex, + } +} + +// TestGetTransactionResult_UnknownTx returns unknown result when tx not found +func (suite *Suite) TestGetTransactionResult_UnknownTx() { + block := unittest.BlockFixture() + tbody := unittest.TransactionBodyFixture() + tx := unittest.TransactionFixture() + tx.TransactionBody = tbody + coll := flow.CollectionFromTransactions([]*flow.Transaction{&tx}) + + suite.transactions. + On("ByID", tx.ID()). + Return(nil, storage.ErrNotFound) + + params := suite.defaultTransactionsParams() + txBackend, err := NewTransactionsBackend(params) + require.NoError(suite.T(), err) + res, err := txBackend.GetTransactionResult( + context.Background(), + tx.ID(), + block.ID(), + coll.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().Equal(res.Status, flow.TransactionStatusUnknown) + suite.Require().Empty(res.BlockID) + suite.Require().Empty(res.BlockHeight) + suite.Require().Empty(res.TransactionID) + suite.Require().Empty(res.CollectionID) + suite.Require().Empty(res.ErrorMessage) +} + +// TestGetTransactionResult_TxLookupFailure returns error from transaction storage +func (suite *Suite) TestGetTransactionResult_TxLookupFailure() { + block := unittest.BlockFixture() + tbody := unittest.TransactionBodyFixture() + tx := unittest.TransactionFixture() + tx.TransactionBody = tbody + coll := flow.CollectionFromTransactions([]*flow.Transaction{&tx}) + + expectedErr := fmt.Errorf("some other error") + suite.transactions. + On("ByID", tx.ID()). + Return(nil, expectedErr) + + params := suite.defaultTransactionsParams() + txBackend, err := NewTransactionsBackend(params) + require.NoError(suite.T(), err) + + _, err = txBackend.GetTransactionResult( + context.Background(), + tx.ID(), + block.ID(), + coll.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().Equal(err, status.Errorf(codes.Internal, "failed to find: %v", expectedErr)) +} + +// TestGetTransactionResult_HistoricNodes_Success tests lookup in historic nodes +func (suite *Suite) TestGetTransactionResult_HistoricNodes_Success() { + block := unittest.BlockFixture() + tbody := unittest.TransactionBodyFixture() + tx := unittest.TransactionFixture() + tx.TransactionBody = tbody + coll := flow.CollectionFromTransactions([]*flow.Transaction{&tx}) + + suite.transactions. + On("ByID", tx.ID()). + Return(nil, storage.ErrNotFound) + + transactionResultResponse := access.TransactionResultResponse{ + Status: entities.TransactionStatus_EXECUTED, + StatusCode: uint32(entities.TransactionStatus_EXECUTED), + } + + suite.historicalAccessAPIClient. + On("GetTransactionResult", mock.Anything, mock.MatchedBy(func(req *access.GetTransactionRequest) bool { + txID := tx.ID() + return bytes.Equal(txID[:], req.Id) + })). + Return(&transactionResultResponse, nil). + Once() + + params := suite.defaultTransactionsParams() + params.HistoricalAccessNodeClients = []access.AccessAPIClient{suite.historicalAccessAPIClient} + txBackend, err := NewTransactionsBackend(params) + require.NoError(suite.T(), err) + + resp, err := txBackend.GetTransactionResult( + context.Background(), + tx.ID(), + block.ID(), + coll.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().Equal(flow.TransactionStatusExecuted, resp.Status) + suite.Require().Equal(uint(flow.TransactionStatusExecuted), resp.StatusCode) +} + +// TestGetTransactionResult_HistoricNodes_FromCache get historic transaction result from cache +func (suite *Suite) TestGetTransactionResult_HistoricNodes_FromCache() { + block := unittest.BlockFixture() + tbody := unittest.TransactionBodyFixture() + tx := unittest.TransactionFixture() + tx.TransactionBody = tbody + + suite.transactions. + On("ByID", tx.ID()). + Return(nil, storage.ErrNotFound) + + transactionResultResponse := access.TransactionResultResponse{ + Status: entities.TransactionStatus_EXECUTED, + StatusCode: uint32(entities.TransactionStatus_EXECUTED), + } + + suite.historicalAccessAPIClient. + On("GetTransactionResult", mock.Anything, mock.MatchedBy(func(req *access.GetTransactionRequest) bool { + txID := tx.ID() + return bytes.Equal(txID[:], req.Id) + })). + Return(&transactionResultResponse, nil). + Once() + + params := suite.defaultTransactionsParams() + params.HistoricalAccessNodeClients = []access.AccessAPIClient{suite.historicalAccessAPIClient} + txBackend, err := NewTransactionsBackend(params) + require.NoError(suite.T(), err) + + coll := flow.CollectionFromTransactions([]*flow.Transaction{&tx}) + resp, err := txBackend.GetTransactionResult( + context.Background(), + tx.ID(), + block.ID(), + coll.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().Equal(flow.TransactionStatusExecuted, resp.Status) + suite.Require().Equal(uint(flow.TransactionStatusExecuted), resp.StatusCode) + + resp2, err := txBackend.GetTransactionResult( + context.Background(), + tx.ID(), + block.ID(), + coll.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().Equal(flow.TransactionStatusExecuted, resp2.Status) + suite.Require().Equal(uint(flow.TransactionStatusExecuted), resp2.StatusCode) +} + +// TestGetTransactionResultUnknownFromCache retrieve unknown result from cache. +func (suite *Suite) TestGetTransactionResultUnknownFromCache() { + block := unittest.BlockFixture() + tbody := unittest.TransactionBodyFixture() + tx := unittest.TransactionFixture() + tx.TransactionBody = tbody + + suite.transactions. + On("ByID", tx.ID()). + Return(nil, storage.ErrNotFound) + + suite.historicalAccessAPIClient. + On("GetTransactionResult", mock.Anything, mock.MatchedBy(func(req *access.GetTransactionRequest) bool { + txID := tx.ID() + return bytes.Equal(txID[:], req.Id) + })). + Return(nil, status.Errorf(codes.NotFound, "no known transaction with ID %s", tx.ID())). + Once() + + params := suite.defaultTransactionsParams() + params.HistoricalAccessNodeClients = []access.AccessAPIClient{suite.historicalAccessAPIClient} + txBackend, err := NewTransactionsBackend(params) + require.NoError(suite.T(), err) + + coll := flow.CollectionFromTransactions([]*flow.Transaction{&tx}) + resp, err := txBackend.GetTransactionResult( + context.Background(), + tx.ID(), + block.ID(), + coll.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().Equal(flow.TransactionStatusUnknown, resp.Status) + suite.Require().Equal(uint(flow.TransactionStatusUnknown), resp.StatusCode) + + // ensure the unknown transaction is cached when not found anywhere + txStatus := flow.TransactionStatusUnknown + res, ok := txBackend.txResultCache.Get(tx.ID()) + suite.Require().True(ok) + suite.Require().Equal(res, &accessmodel.TransactionResult{ + Status: txStatus, + StatusCode: uint(txStatus), + }) + + // ensure underlying GetTransactionResult() won't be called the second time + resp2, err := txBackend.GetTransactionResult( + context.Background(), + tx.ID(), + block.ID(), + coll.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().Equal(flow.TransactionStatusUnknown, resp2.Status) + suite.Require().Equal(uint(flow.TransactionStatusUnknown), resp2.StatusCode) +} + +// TestGetSystemTransaction_HappyPath tests that GetSystemTransaction call returns system chunk transaction. +func (suite *Suite) TestGetSystemTransaction_HappyPath() { + params := suite.defaultTransactionsParams() + txBackend, err := NewTransactionsBackend(params) + require.NoError(suite.T(), err) + + block := unittest.BlockFixture() + res, err := txBackend.GetSystemTransaction(context.Background(), block.ID()) + suite.Require().NoError(err) + + systemTx, err := blueprints.SystemChunkTransaction(suite.chainID.Chain()) + suite.Require().NoError(err) + + suite.Require().Equal(systemTx, res) +} + +func (suite *Suite) TestGetSystemTransactionResult_HappyPath() { + test := func(snapshot protocol.Snapshot) { + suite.state. + On("Sealed"). + Return(snapshot, nil). + Once() + + lastBlock, err := snapshot.Head() + suite.Require().NoError(err) + + identities, err := snapshot.Identities(filter.Any) + suite.Require().NoError(err) + + block := unittest.BlockWithParentFixture(lastBlock) + blockID := block.ID() + suite.state. + On("AtBlockID", blockID). + Return(unittest.StateSnapshotForKnownBlock(block.Header, identities.Lookup()), nil). + Once() + + // block storage returns the corresponding block + suite.blocks. + On("ByID", blockID). + Return(block, nil). + Once() + + receipt1 := unittest.ReceiptForBlockFixture(block) + suite.receipts. + On("ByBlockID", block.ID()). + Return(flow.ExecutionReceiptList{receipt1}, nil) + + // Generating events with event generator + exeNodeEventEncodingVersion := entities.EventEncodingVersion_CCF_V0 + events := generator.GetEventsWithEncoding(1, exeNodeEventEncodingVersion) + eventMessages := convert.EventsToMessages(events) + + exeEventResp := &execproto.GetTransactionResultsResponse{ + TransactionResults: []*execproto.GetTransactionResultResponse{{ + Events: eventMessages, + EventEncodingVersion: exeNodeEventEncodingVersion, + }}, + EventEncodingVersion: exeNodeEventEncodingVersion, + } + + suite.executionAPIClient. + On("GetTransactionResult", mock.Anything, mock.MatchedBy(func(req *execproto.GetTransactionResultRequest) bool { + txID := suite.systemTx.ID() + return bytes.Equal(txID[:], req.TransactionId) + })). + Return(exeEventResp.TransactionResults[0], nil). + Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + // the connection factory should be used to get the execution node client + params := suite.defaultTransactionsParams() + backend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := backend.GetSystemTransactionResult( + context.Background(), + block.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + + // Expected system chunk transaction + suite.Require().Equal(flow.TransactionStatusExecuted, res.Status) + suite.Require().Equal(suite.systemTx.ID(), res.TransactionID) + + // Check for successful decoding of event + _, err = jsoncdc.Decode(nil, res.Events[0].Payload) + suite.Require().NoError(err) + + events, err = convert.MessagesToEventsWithEncodingConversion( + eventMessages, + exeNodeEventEncodingVersion, + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().Equal(events, res.Events) + } + + identities := unittest.CompleteIdentitySet() + rootSnapshot := unittest.RootSnapshotFixture(identities) + util.RunWithFullProtocolStateAndMutator( + suite.T(), + rootSnapshot, + func(db *badger.DB, state *bprotocol.ParticipantState, mutableState protocol.MutableProtocolState) { + epochBuilder := unittest.NewEpochBuilder(suite.T(), mutableState, state) + + epochBuilder. + BuildEpoch(). + CompleteEpoch() + + // get heights of each phase in built epochs + epoch1, ok := epochBuilder.EpochHeights(1) + require.True(suite.T(), ok) + + snapshot := state.AtHeight(epoch1.FinalHeight()) + suite.state.On("Final").Return(snapshot) + test(snapshot) + }, + ) +} + +func (suite *Suite) TestGetSystemTransactionResultFromStorage() { + block := unittest.BlockFixture() + sysTx, err := blueprints.SystemChunkTransaction(suite.chainID.Chain()) + suite.Require().NoError(err) + suite.Require().NotNil(sysTx) + txId := suite.systemTx.ID() + blockId := block.ID() + + suite.blocks. + On("ByID", blockId). + Return(&block, nil). + Once() + + lightTxShouldFail := false + suite.lightTxResults. + On("ByBlockIDTransactionID", blockId, txId). + Return(&flow.LightTransactionResult{ + TransactionID: txId, + Failed: lightTxShouldFail, + ComputationUsed: 0, + }, nil). + Once() + + // Set up the events storage mock + var eventsForTx []flow.Event + // expect a call to lookup events by block ID and transaction ID + suite.events.On("ByBlockIDTransactionID", blockId, txId).Return(eventsForTx, nil) + + // Set up the state and snapshot mocks + suite.state.On("Sealed").Return(suite.snapshot, nil) + suite.snapshot.On("Head").Return(block.Header, nil) + + // create a mock index reporter + reporter := syncmock.NewIndexReporter(suite.T()) + reporter.On("LowestIndexedHeight").Return(block.Header.Height, nil) + reporter.On("HighestIndexedHeight").Return(block.Header.Height+10, nil) + + indexReporter := index.NewReporter() + err = indexReporter.Initialize(reporter) + suite.Require().NoError(err) + + // Set up the backend parameters and the backend instance + params := suite.defaultTransactionsParams() + params.EventsIndex = index.NewEventsIndex(indexReporter, suite.events) + params.TxResultsIndex = index.NewTransactionResultsIndex(indexReporter, suite.lightTxResults) + params.TxProvider = provider.NewLocalTransactionProvider( + params.State, + params.Collections, + params.Blocks, + params.EventsIndex, + params.TxResultsIndex, + params.TxErrorMessageProvider, + params.SystemTxID, + params.TxStatusDeriver, + ) + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + response, err := txBackend.GetSystemTransactionResult(context.Background(), blockId, entities.EventEncodingVersion_JSON_CDC_V0) + suite.assertTransactionResultResponse(err, response, block, txId, lightTxShouldFail, eventsForTx) +} + +// TestGetSystemTransactionResult_BlockNotFound tests GetSystemTransactionResult function when block was not found. +func (suite *Suite) TestGetSystemTransactionResult_BlockNotFound() { + block := unittest.BlockFixture() + suite.blocks. + On("ByID", block.ID()). + Return(nil, storage.ErrNotFound). + Once() + + params := suite.defaultTransactionsParams() + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + res, err := txBackend.GetSystemTransactionResult( + context.Background(), + block.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + + suite.Require().Nil(res) + suite.Require().Error(err) + suite.Require().Equal(err, status.Errorf(codes.NotFound, "not found: %v", fmt.Errorf("key not found"))) +} + +// TestGetSystemTransactionResult_FailedEncodingConversion tests the GetSystemTransactionResult function with different +// event encoding versions. +func (suite *Suite) TestGetSystemTransactionResult_FailedEncodingConversion() { + block := unittest.BlockFixture() + blockID := block.ID() + + _, fixedENIDs := suite.setupReceipts(&block) + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + + suite.snapshot.On("Head").Return(block.Header, nil) + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) + suite.state.On("Sealed").Return(suite.snapshot, nil) + suite.state.On("Final").Return(suite.snapshot, nil) + + // block storage returns the corresponding block + suite.blocks. + On("ByID", blockID). + Return(&block, nil). + Once() + + // create empty events + eventsPerBlock := 10 + eventMessages := make([]*entities.Event, eventsPerBlock) + + exeEventResp := &execproto.GetTransactionResultsResponse{ + TransactionResults: []*execproto.GetTransactionResultResponse{{ + Events: eventMessages, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }}, + } + + suite.executionAPIClient. + On("GetTransactionResult", mock.Anything, mock.MatchedBy(func(req *execproto.GetTransactionResultRequest) bool { + txID := suite.systemTx.ID() + return bytes.Equal(txID[:], req.TransactionId) + })). + Return(exeEventResp.TransactionResults[0], nil). + Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + params := suite.defaultTransactionsParams() + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := txBackend.GetSystemTransactionResult( + context.Background(), + block.ID(), + entities.EventEncodingVersion_CCF_V0, + ) + + suite.Require().Nil(res) + suite.Require().Error(err) + suite.Require().Equal(err, status.Errorf(codes.Internal, "failed to convert events to message: %v", + fmt.Errorf("conversion from format JSON_CDC_V0 to CCF_V0 is not supported"))) +} + +// TestGetTransactionResult_FromStorage tests the retrieval of a transaction result (flow.TransactionResult) from storage +// instead of requesting it from the Execution Node. +func (suite *Suite) TestGetTransactionResult_FromStorage() { + // Create fixtures for block, transaction, and collection + block := unittest.BlockFixture() + transaction := unittest.TransactionFixture() + col := flow.CollectionFromTransactions([]*flow.Transaction{&transaction}) + guarantee := col.Guarantee() + block.SetPayload(unittest.PayloadFixture(unittest.WithGuarantees(&guarantee))) + txId := transaction.ID() + blockId := block.ID() + + suite.blocks. + On("ByID", blockId). + Return(&block, nil) + + suite.lightTxResults.On("ByBlockIDTransactionID", blockId, txId). + Return(&flow.LightTransactionResult{ + TransactionID: txId, + Failed: true, + ComputationUsed: 0, + }, nil) + + suite.transactions. + On("ByID", txId). + Return(&transaction.TransactionBody, nil) + + // Set up the light collection and mock the behavior of the collections object + lightCol := col.Light() + suite.collections.On("LightByID", col.ID()).Return(&lightCol, nil) + + // Set up the events storage mock + totalEvents := 5 + eventsForTx := unittest.EventsFixture(totalEvents, flow.EventAccountCreated) + eventMessages := make([]*entities.Event, totalEvents) + for j, event := range eventsForTx { + eventMessages[j] = convert.EventToMessage(event) + } + // expect a call to lookup events by block ID and transaction ID + suite.events.On("ByBlockIDTransactionID", blockId, txId).Return(eventsForTx, nil) + + // Set up the state and snapshot mocks + _, fixedENIDs := suite.setupReceipts(&block) + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + + suite.state.On("Final").Return(suite.snapshot, nil) + suite.state.On("Sealed").Return(suite.snapshot, nil) + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) + suite.snapshot.On("Head").Return(block.Header, nil) + + suite.reporter.On("LowestIndexedHeight").Return(block.Header.Height, nil) + suite.reporter.On("HighestIndexedHeight").Return(block.Header.Height+10, nil) + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + // Set up the expected error message for the execution node response + exeEventReq := &execproto.GetTransactionErrorMessageRequest{ + BlockId: blockId[:], + TransactionId: txId[:], + } + exeEventResp := &execproto.GetTransactionErrorMessageResponse{ + TransactionId: txId[:], + ErrorMessage: expectedErrorMsg, + } + suite.executionAPIClient. + On("GetTransactionErrorMessage", mock.Anything, exeEventReq). + Return(exeEventResp, nil). + Once() + + params := suite.defaultTransactionsParams() + params.TxErrorMessageProvider = error_messages.NewTxErrorMessageProvider( + params.Log, + nil, + params.TxResultsIndex, + params.ConnFactory, + params.NodeCommunicator, + params.NodeProvider, + ) + params.TxProvider = provider.NewLocalTransactionProvider( + params.State, + params.Collections, + params.Blocks, + params.EventsIndex, + params.TxResultsIndex, + params.TxErrorMessageProvider, + params.SystemTxID, + params.TxStatusDeriver, + ) + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + response, err := txBackend.GetTransactionResult(context.Background(), txId, blockId, flow.ZeroID, entities.EventEncodingVersion_JSON_CDC_V0) + suite.assertTransactionResultResponse(err, response, block, txId, true, eventsForTx) + + suite.reporter.AssertExpectations(suite.T()) + suite.connectionFactory.AssertExpectations(suite.T()) + suite.executionAPIClient.AssertExpectations(suite.T()) + suite.blocks.AssertExpectations(suite.T()) + suite.events.AssertExpectations(suite.T()) + suite.state.AssertExpectations(suite.T()) +} + +// TestTransactionByIndexFromStorage tests the retrieval of a transaction result (flow.TransactionResult) by index +// and returns it from storage instead of requesting from the Execution Node. +func (suite *Suite) TestTransactionByIndexFromStorage() { + // Create fixtures for block, transaction, and collection + block := unittest.BlockFixture() + transaction := unittest.TransactionFixture() + col := flow.CollectionFromTransactions([]*flow.Transaction{&transaction}) + guarantee := col.Guarantee() + block.SetPayload(unittest.PayloadFixture(unittest.WithGuarantees(&guarantee))) + blockId := block.ID() + txId := transaction.ID() + txIndex := rand.Uint32() + + // Set up the light collection and mock the behavior of the collections object + lightCol := col.Light() + suite.collections.On("LightByID", col.ID()).Return(&lightCol, nil) + + // Mock the behavior of the blocks and lightTxResults objects + suite.blocks. + On("ByID", blockId). + Return(&block, nil) + + suite.lightTxResults.On("ByBlockIDTransactionIndex", blockId, txIndex). + Return(&flow.LightTransactionResult{ + TransactionID: txId, + Failed: true, + ComputationUsed: 0, + }, nil) + + // Set up the events storage mock + totalEvents := 5 + eventsForTx := unittest.EventsFixture(totalEvents, flow.EventAccountCreated) + eventMessages := make([]*entities.Event, totalEvents) + for j, event := range eventsForTx { + eventMessages[j] = convert.EventToMessage(event) + } + + // expect a call to lookup events by block ID and transaction ID + suite.events.On("ByBlockIDTransactionIndex", blockId, txIndex).Return(eventsForTx, nil) + + // Set up the state and snapshot mocks + _, fixedENIDs := suite.setupReceipts(&block) + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + suite.state.On("Final").Return(suite.snapshot, nil) + suite.state.On("Sealed").Return(suite.snapshot, nil) + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) + suite.snapshot.On("Head").Return(block.Header, nil) + + suite.reporter.On("LowestIndexedHeight").Return(block.Header.Height, nil) + suite.reporter.On("HighestIndexedHeight").Return(block.Header.Height+10, nil) + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + params := suite.defaultTransactionsParams() + params.TxErrorMessageProvider = error_messages.NewTxErrorMessageProvider( + params.Log, + nil, + params.TxResultsIndex, + params.ConnFactory, + params.NodeCommunicator, + params.NodeProvider, + ) + params.TxProvider = provider.NewLocalTransactionProvider( + params.State, + params.Collections, + params.Blocks, + params.EventsIndex, + params.TxResultsIndex, + params.TxErrorMessageProvider, + params.SystemTxID, + params.TxStatusDeriver, + ) + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + // Set up the expected error message for the execution node response + exeEventReq := &execproto.GetTransactionErrorMessageByIndexRequest{ + BlockId: blockId[:], + Index: txIndex, + } + + exeEventResp := &execproto.GetTransactionErrorMessageResponse{ + TransactionId: txId[:], + ErrorMessage: expectedErrorMsg, + } + + suite.executionAPIClient. + On("GetTransactionErrorMessageByIndex", mock.Anything, exeEventReq). + Return(exeEventResp, nil). + Once() + + response, err := txBackend.GetTransactionResultByIndex(context.Background(), blockId, txIndex, entities.EventEncodingVersion_JSON_CDC_V0) + suite.assertTransactionResultResponse(err, response, block, txId, true, eventsForTx) +} + +// TestTransactionResultsByBlockIDFromStorage tests the retrieval of transaction results ([]flow.TransactionResult) +// by block ID from storage instead of requesting from the Execution Node. +func (suite *Suite) TestTransactionResultsByBlockIDFromStorage() { + // Create fixtures for the block and collection + block := unittest.BlockFixture() + col := unittest.CollectionFixture(2) + guarantee := col.Guarantee() + block.SetPayload(unittest.PayloadFixture(unittest.WithGuarantees(&guarantee))) + blockId := block.ID() + + // Mock the behavior of the blocks, collections and light transaction results objects + suite.blocks. + On("ByID", blockId). + Return(&block, nil) + lightCol := col.Light() + suite.collections. + On("LightByID", mock.Anything). + Return(&lightCol, nil). + Once() + + lightTxResults := make([]flow.LightTransactionResult, len(lightCol.Transactions)) + for i, txID := range lightCol.Transactions { + lightTxResults[i] = flow.LightTransactionResult{ + TransactionID: txID, + Failed: false, + ComputationUsed: 0, + } + } + // simulate the system tx + lightTxResults = append(lightTxResults, flow.LightTransactionResult{ + TransactionID: suite.systemTx.ID(), + Failed: false, + ComputationUsed: 10, + }) + + // Mark the first transaction as failed + lightTxResults[0].Failed = true + suite.lightTxResults. + On("ByBlockID", blockId). + Return(lightTxResults, nil). + Once() + + // Set up the events storage mock + totalEvents := 5 + eventsForTx := unittest.EventsFixture(totalEvents, flow.EventAccountCreated) + eventMessages := make([]*entities.Event, totalEvents) + for j, event := range eventsForTx { + eventMessages[j] = convert.EventToMessage(event) + } + + // expect a call to lookup events by block ID and transaction ID + suite.events. + On("ByBlockIDTransactionID", blockId, mock.Anything). + Return(eventsForTx, nil) + + // Set up the state and snapshot mocks + _, fixedENIDs := suite.setupReceipts(&block) + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + suite.state.On("Final").Return(suite.snapshot, nil) + suite.state.On("Sealed").Return(suite.snapshot, nil) + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) + suite.snapshot.On("Head").Return(block.Header, nil) + + suite.reporter.On("LowestIndexedHeight").Return(block.Header.Height, nil) + suite.reporter.On("HighestIndexedHeight").Return(block.Header.Height+10, nil) + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + params := suite.defaultTransactionsParams() + params.TxErrorMessageProvider = error_messages.NewTxErrorMessageProvider( + params.Log, + nil, + params.TxResultsIndex, + params.ConnFactory, + params.NodeCommunicator, + params.NodeProvider, + ) + params.TxProvider = provider.NewLocalTransactionProvider( + params.State, + params.Collections, + params.Blocks, + params.EventsIndex, + params.TxResultsIndex, + params.TxErrorMessageProvider, + params.SystemTxID, + params.TxStatusDeriver, + ) + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + // Set up the expected error message for the execution node response + exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: blockId[:], + } + res := &execproto.GetTransactionErrorMessagesResponse_Result{ + TransactionId: lightTxResults[0].TransactionID[:], + ErrorMessage: expectedErrorMsg, + Index: 1, + } + exeEventResp := &execproto.GetTransactionErrorMessagesResponse{ + Results: []*execproto.GetTransactionErrorMessagesResponse_Result{ + res, + }, + } + suite.executionAPIClient. + On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(exeEventResp, nil). + Once() + + response, err := txBackend.GetTransactionResultsByBlockID(context.Background(), blockId, entities.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Assert().Equal(len(lightTxResults), len(response)) + + // Assertions for each transaction result in the response + for i, responseResult := range response { + lightTx := lightTxResults[i] + suite.assertTransactionResultResponse(err, responseResult, block, lightTx.TransactionID, lightTx.Failed, eventsForTx) + } +} + +// TestTransactionRetry tests that the retry mechanism will send retries at specific times +func (suite *Suite) TestTransactionRetry() { + collection := unittest.CollectionFixture(1) + transactionBody := collection.Transactions[0] + block := unittest.BlockFixture() + // Height needs to be at least DefaultTransactionExpiry before we start doing retries + block.Header.Height = flow.DefaultTransactionExpiry + 1 + transactionBody.SetReferenceBlockID(block.ID()) + headBlock := unittest.BlockFixture() + headBlock.Header.Height = block.Header.Height - 1 // head is behind the current block + suite.state.On("Final").Return(suite.snapshot, nil) + + suite.snapshot.On("Head").Return(headBlock.Header, nil) + snapshotAtBlock := protocolmock.NewSnapshot(suite.T()) + snapshotAtBlock.On("Head").Return(block.Header, nil) + suite.state.On("AtBlockID", block.ID()).Return(snapshotAtBlock, nil) + + // collection storage returns a not found error + suite.collections. + On("LightByTransactionID", transactionBody.ID()). + Return(nil, storage.ErrNotFound) + + client := accessmock.NewAccessAPIClient(suite.T()) + params := suite.defaultTransactionsParams() + params.StaticCollectionRPCClient = client + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + retry := retrier.NewRetrier( + suite.log, + suite.blocks, + suite.collections, + txBackend, + txBackend.txStatusDeriver, + ) + retry.RegisterTransaction(block.Header.Height, transactionBody) + + client.On("SendTransaction", mock.Anything, mock.Anything).Return(&access.SendTransactionResponse{}, nil) + + // Don't retry on every height + err = retry.Retry(block.Header.Height + 1) + suite.Require().NoError(err) + + client.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) + + // Retry every `retryFrequency` + err = retry.Retry(block.Header.Height + retrier.RetryFrequency) + suite.Require().NoError(err) + + client.AssertNumberOfCalls(suite.T(), "SendTransaction", 1) + + // do not retry if expired + err = retry.Retry(block.Header.Height + retrier.RetryFrequency + flow.DefaultTransactionExpiry) + suite.Require().NoError(err) + + // Should've still only been called once + client.AssertNumberOfCalls(suite.T(), "SendTransaction", 1) +} + +// TestSuccessfulTransactionsDontRetry tests that the retry mechanism will send retries at specific times +func (suite *Suite) TestSuccessfulTransactionsDontRetry() { + collection := unittest.CollectionFixture(1) + light := collection.Light() + transactionBody := collection.Transactions[0] + txID := transactionBody.ID() + + block := unittest.BlockFixture() + blockID := block.ID() + + // setup chain state + _, fixedENIDs := suite.setupReceipts(&block) + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + + suite.state.On("Final").Return(suite.snapshot, nil) + suite.transactions.On("ByID", transactionBody.ID()).Return(transactionBody, nil) + suite.collections.On("LightByTransactionID", transactionBody.ID()).Return(&light, nil) + suite.blocks.On("ByCollectionID", collection.ID()).Return(&block, nil) + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) + + exeEventReq := execproto.GetTransactionResultRequest{ + BlockId: blockID[:], + TransactionId: txID[:], + } + exeEventResp := execproto.GetTransactionResultResponse{ + Events: nil, + } + suite.executionAPIClient. + On("GetTransactionResult", context.Background(), &exeEventReq). + Return(&exeEventResp, status.Errorf(codes.NotFound, "not found")). + Times(len(fixedENIDs)) // should call each EN once + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Times(len(fixedENIDs)) + + params := suite.defaultTransactionsParams() + client := accessmock.NewAccessAPIClient(suite.T()) + params.StaticCollectionRPCClient = client + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + retry := retrier.NewRetrier( + suite.log, + suite.blocks, + suite.collections, + txBackend, + txBackend.txStatusDeriver, + ) + retry.RegisterTransaction(block.Header.Height, transactionBody) + + // first call - when block under test is greater height than the sealed head, but execution node does not know about Tx + result, err := txBackend.GetTransactionResult( + context.Background(), + txID, + flow.ZeroID, + flow.ZeroID, + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().NotNil(result) + + // status should be finalized since the sealed Blocks is smaller in height + suite.Assert().Equal(flow.TransactionStatusFinalized, result.Status) + + // Don't retry when block is finalized + err = retry.Retry(block.Header.Height + 1) + suite.Require().NoError(err) + + client.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) + + // Don't retry when block is finalized + err = retry.Retry(block.Header.Height + retrier.RetryFrequency) + suite.Require().NoError(err) + + client.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) + + // Don't retry when block is finalized + err = retry.Retry(block.Header.Height + retrier.RetryFrequency + flow.DefaultTransactionExpiry) + suite.Require().NoError(err) + + // Should've still should not be called + client.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) +} + +func (suite *Suite) setupReceipts(block *flow.Block) ([]*flow.ExecutionReceipt, flow.IdentityList) { + ids := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) + receipt1 := unittest.ReceiptForBlockFixture(block) + receipt1.ExecutorID = ids[0].NodeID + receipt2 := unittest.ReceiptForBlockFixture(block) + receipt2.ExecutorID = ids[1].NodeID + receipt1.ExecutionResult = receipt2.ExecutionResult + + receipts := flow.ExecutionReceiptList{receipt1, receipt2} + suite.receipts. + On("ByBlockID", block.ID()). + Return(receipts, nil) + + return receipts, ids +} + +func (suite *Suite) assertTransactionResultResponse( + err error, + response *accessmodel.TransactionResult, + block flow.Block, + txId flow.Identifier, + txFailed bool, + eventsForTx []flow.Event, +) { + suite.Require().NoError(err) + suite.Assert().Equal(block.ID(), response.BlockID) + suite.Assert().Equal(block.Header.Height, response.BlockHeight) + suite.Assert().Equal(txId, response.TransactionID) + if txId == suite.systemTx.ID() { + suite.Assert().Equal(flow.ZeroID, response.CollectionID) + } else { + suite.Assert().Equal(block.Payload.Guarantees[0].CollectionID, response.CollectionID) + } + suite.Assert().Equal(len(eventsForTx), len(response.Events)) + // When there are error messages occurred in the transaction, the status should be 1 + if txFailed { + suite.Assert().Equal(uint(1), response.StatusCode) + suite.Assert().Equal(expectedErrorMsg, response.ErrorMessage) + } else { + suite.Assert().Equal(uint(0), response.StatusCode) + suite.Assert().Equal("", response.ErrorMessage) + } + suite.Assert().Equal(flow.TransactionStatusSealed, response.Status) +} diff --git a/engine/access/rpc/rate_limit_test.go b/engine/access/rpc/rate_limit_test.go index cfbe259ff39..04f7fcff07d 100644 --- a/engine/access/rpc/rate_limit_test.go +++ b/engine/access/rpc/rate_limit_test.go @@ -23,6 +23,8 @@ import ( accessmock "github.com/onflow/flow-go/engine/access/mock" "github.com/onflow/flow-go/engine/access/rest/websockets" "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/grpcserver" @@ -169,7 +171,10 @@ func (suite *RateLimitTestSuite) SetupTest() { MaxHeightRange: 0, Log: suite.log, SnapshotHistoryLimit: 0, - Communicator: backend.NewNodeCommunicator(false), + Communicator: node_communicator.NewNodeCommunicator(false), + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, }) suite.Require().NoError(err) diff --git a/engine/access/secure_grpcr_test.go b/engine/access/secure_grpcr_test.go index 82cd74b3cf8..1ff89ffbd9b 100644 --- a/engine/access/secure_grpcr_test.go +++ b/engine/access/secure_grpcr_test.go @@ -22,6 +22,8 @@ import ( "github.com/onflow/flow-go/engine/access/rest/websockets" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/grpcserver" @@ -153,7 +155,10 @@ func (suite *SecureGRPCTestSuite) SetupTest() { MaxHeightRange: 0, Log: suite.log, SnapshotHistoryLimit: 0, - Communicator: backend.NewNodeCommunicator(false), + Communicator: node_communicator.NewNodeCommunicator(false), + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, }) suite.Require().NoError(err) diff --git a/engine/access/state_stream/backend/backend.go b/engine/access/state_stream/backend/backend.go index 3f0decc0e07..9d4fbdfc000 100644 --- a/engine/access/state_stream/backend/backend.go +++ b/engine/access/state_stream/backend/backend.go @@ -118,7 +118,7 @@ func New( executionDataTracker: executionDataTracker, } - eventsRetriever := EventsRetriever{ + eventsProvider := EventsProvider{ log: logger, headers: headers, getExecutionData: b.getExecutionData, @@ -130,14 +130,14 @@ func New( log: logger, subscriptionHandler: subscriptionHandler, executionDataTracker: executionDataTracker, - eventsRetriever: eventsRetriever, + eventsProvider: eventsProvider, } b.AccountStatusesBackend = AccountStatusesBackend{ log: logger, subscriptionHandler: subscriptionHandler, executionDataTracker: b.ExecutionDataTracker, - eventsRetriever: eventsRetriever, + eventsProvider: eventsProvider, } return b, nil diff --git a/engine/access/state_stream/backend/backend_account_statuses.go b/engine/access/state_stream/backend/backend_account_statuses.go index 1a8decb5ca0..d168e7ddd96 100644 --- a/engine/access/state_stream/backend/backend_account_statuses.go +++ b/engine/access/state_stream/backend/backend_account_statuses.go @@ -27,7 +27,7 @@ type AccountStatusesBackend struct { subscriptionHandler *subscription.SubscriptionHandler executionDataTracker tracker.ExecutionDataTracker - eventsRetriever EventsRetriever + eventsProvider EventsProvider } // subscribe creates and returns a subscription to receive account status updates starting from the specified height. @@ -97,7 +97,7 @@ func (b *AccountStatusesBackend) getAccountStatusResponseFactory( filter state_stream.AccountStatusFilter, ) subscription.GetDataByHeightFunc { return func(ctx context.Context, height uint64) (interface{}, error) { - eventsResponse, err := b.eventsRetriever.GetAllEventsResponse(ctx, height) + eventsResponse, err := b.eventsProvider.GetAllEventsResponse(ctx, height) if err != nil { if errors.Is(err, storage.ErrNotFound) || errors.Is(err, storage.ErrHeightNotIndexed) { diff --git a/engine/access/state_stream/backend/backend_events.go b/engine/access/state_stream/backend/backend_events.go index b5592c2c89e..e4c94ffc5dd 100644 --- a/engine/access/state_stream/backend/backend_events.go +++ b/engine/access/state_stream/backend/backend_events.go @@ -19,7 +19,7 @@ type EventsBackend struct { subscriptionHandler *subscription.SubscriptionHandler executionDataTracker tracker.ExecutionDataTracker - eventsRetriever EventsRetriever + eventsProvider EventsProvider } // SubscribeEvents is deprecated and will be removed in a future version. @@ -133,7 +133,7 @@ func (b *EventsBackend) SubscribeEventsFromLatest(ctx context.Context, filter st // - subscription.ErrBlockNotReady: execution data for the given block height is not available. func (b *EventsBackend) getResponseFactory(filter state_stream.EventFilter) subscription.GetDataByHeightFunc { return func(ctx context.Context, height uint64) (response interface{}, err error) { - eventsResponse, err := b.eventsRetriever.GetAllEventsResponse(ctx, height) + eventsResponse, err := b.eventsProvider.GetAllEventsResponse(ctx, height) if err != nil { if errors.Is(err, storage.ErrNotFound) || errors.Is(err, storage.ErrHeightNotIndexed) { diff --git a/engine/access/state_stream/backend/event_retriever.go b/engine/access/state_stream/backend/event_retriever.go index eb1ef29c015..3013836666f 100644 --- a/engine/access/state_stream/backend/event_retriever.go +++ b/engine/access/state_stream/backend/event_retriever.go @@ -21,9 +21,9 @@ type EventsResponse struct { BlockTimestamp time.Time } -// EventsRetriever retrieves events by block height. It can be configured to retrieve events from +// EventsProvider retrieves events by block height. It can be configured to retrieve events from // the events indexer(if available) or using a dedicated callback to query it from other sources. -type EventsRetriever struct { +type EventsProvider struct { log zerolog.Logger headers storage.Headers getExecutionData GetExecutionDataFunc @@ -35,7 +35,7 @@ type EventsRetriever struct { // Expected errors: // - codes.NotFound: If block header for the specified block height is not found. // - error: An error, if any, encountered during getting events from storage or execution data. -func (b *EventsRetriever) GetAllEventsResponse(ctx context.Context, height uint64) (*EventsResponse, error) { +func (b *EventsProvider) GetAllEventsResponse(ctx context.Context, height uint64) (*EventsResponse, error) { var response *EventsResponse var err error if b.useEventsIndex { @@ -66,7 +66,7 @@ func (b *EventsRetriever) GetAllEventsResponse(ctx context.Context, height uint6 // getEventsFromExecutionData returns the events for a given height extract from the execution data. // Expected errors: // - error: An error indicating issues with getting execution data for block -func (b *EventsRetriever) getEventsFromExecutionData(ctx context.Context, height uint64) (*EventsResponse, error) { +func (b *EventsProvider) getEventsFromExecutionData(ctx context.Context, height uint64) (*EventsResponse, error) { executionData, err := b.getExecutionData(ctx, height) if err != nil { return nil, fmt.Errorf("could not get execution data for block %d: %w", height, err) @@ -88,7 +88,7 @@ func (b *EventsRetriever) getEventsFromExecutionData(ctx context.Context, height // Expected errors: // - error: An error indicating any issues with the provided block height or // an error indicating issue with getting events for a block. -func (b *EventsRetriever) getEventsFromStorage(height uint64) (*EventsResponse, error) { +func (b *EventsProvider) getEventsFromStorage(height uint64) (*EventsResponse, error) { blockID, err := b.headers.BlockIDByHeight(height) if err != nil { return nil, fmt.Errorf("could not get header for height %d: %w", height, err) diff --git a/engine/collection/epochmgr/factories/builder.go b/engine/collection/epochmgr/factories/builder.go index a588e52a080..305221683b9 100644 --- a/engine/collection/epochmgr/factories/builder.go +++ b/engine/collection/epochmgr/factories/builder.go @@ -3,7 +3,7 @@ package factories import ( "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine/collection" @@ -17,8 +17,9 @@ import ( ) type BuilderFactory struct { - db *badger.DB + db storage.DB protoState protocol.State + lockManager lockctx.Manager mainChainHeaders storage.Headers trace module.Tracer opts []builder.Opt @@ -28,8 +29,9 @@ type BuilderFactory struct { } func NewBuilderFactory( - db *badger.DB, + db storage.DB, protoState protocol.State, + lockManager lockctx.Manager, mainChainHeaders storage.Headers, trace module.Tracer, metrics module.CollectionMetrics, @@ -41,6 +43,7 @@ func NewBuilderFactory( factory := &BuilderFactory{ db: db, protoState: protoState, + lockManager: lockManager, mainChainHeaders: mainChainHeaders, trace: trace, metrics: metrics, @@ -62,6 +65,7 @@ func (f *BuilderFactory) Create( build, err := builder.NewBuilder( f.db, f.trace, + f.lockManager, f.protoState, clusterState, f.mainChainHeaders, @@ -78,6 +82,7 @@ func (f *BuilderFactory) Create( final := finalizer.NewFinalizer( f.db, + f.lockManager, pool, f.pusher, f.metrics, diff --git a/engine/collection/epochmgr/factories/cluster_state.go b/engine/collection/epochmgr/factories/cluster_state.go index 7f786f4ff36..9548f033943 100644 --- a/engine/collection/epochmgr/factories/cluster_state.go +++ b/engine/collection/epochmgr/factories/cluster_state.go @@ -3,43 +3,47 @@ package factories import ( "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/module" clusterkv "github.com/onflow/flow-go/state/cluster/badger" - bstorage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) type ClusterStateFactory struct { - db *badger.DB - metrics module.CacheMetrics - tracer module.Tracer + db storage.DB + lockManager lockctx.Manager + metrics module.CacheMetrics + tracer module.Tracer } func NewClusterStateFactory( - db *badger.DB, + db storage.DB, + lockManager lockctx.Manager, metrics module.CacheMetrics, tracer module.Tracer, ) (*ClusterStateFactory, error) { factory := &ClusterStateFactory{ - db: db, - metrics: metrics, - tracer: tracer, + db: db, + lockManager: lockManager, + metrics: metrics, + tracer: tracer, } return factory, nil } func (f *ClusterStateFactory) Create(stateRoot *clusterkv.StateRoot) ( *clusterkv.MutableState, - *bstorage.Headers, - *bstorage.ClusterPayloads, - *bstorage.ClusterBlocks, + *store.Headers, + storage.ClusterPayloads, + storage.ClusterBlocks, error, ) { - headers := bstorage.NewHeaders(f.metrics, f.db) - payloads := bstorage.NewClusterPayloads(f.metrics, f.db) - blocks := bstorage.NewClusterBlocks(f.db, stateRoot.ClusterID(), headers, payloads) + headers := store.NewHeaders(f.metrics, f.db) + payloads := store.NewClusterPayloads(f.metrics, f.db) + blocks := store.NewClusterBlocks(f.db, stateRoot.ClusterID(), headers, payloads) isBootStrapped, err := clusterkv.IsBootstrapped(f.db, stateRoot.ClusterID()) if err != nil { @@ -52,13 +56,13 @@ func (f *ClusterStateFactory) Create(stateRoot *clusterkv.StateRoot) ( return nil, nil, nil, nil, fmt.Errorf("could not open cluster state: %w", err) } } else { - clusterState, err = clusterkv.Bootstrap(f.db, stateRoot) + clusterState, err = clusterkv.Bootstrap(f.db, f.lockManager, stateRoot) if err != nil { return nil, nil, nil, nil, fmt.Errorf("could not bootstrap cluster state: %w", err) } } - mutableState, err := clusterkv.NewMutableState(clusterState, f.tracer, headers, payloads) + mutableState, err := clusterkv.NewMutableState(clusterState, f.lockManager, f.tracer, headers, payloads) if err != nil { return nil, nil, nil, nil, fmt.Errorf("could create mutable cluster state: %w", err) } diff --git a/engine/collection/message_hub/message_hub.go b/engine/collection/message_hub/message_hub.go index 857f36c3d7d..b4f27de244c 100644 --- a/engine/collection/message_hub/message_hub.go +++ b/engine/collection/message_hub/message_hub.go @@ -354,23 +354,21 @@ func (h *MessageHub) sendOwnProposal(header *flow.Header) error { // OnOwnVote propagates the vote to relevant recipient(s): // - [common case] vote is queued and is sent via unicast to another node that is the next leader by worker // - [special case] this node is the next leader: vote is directly forwarded to the node's internal `VoteAggregator` -func (h *MessageHub) OnOwnVote(blockID flow.Identifier, view uint64, sigData []byte, recipientID flow.Identifier) { - vote := &messages.ClusterBlockVote{ - BlockID: blockID, - View: view, - SigData: sigData, - } - +func (h *MessageHub) OnOwnVote(vote *model.Vote, recipientID flow.Identifier) { // special case: I am the next leader if recipientID == h.me.NodeID() { - h.forwardToOwnVoteAggregator(vote, h.me.NodeID()) // forward vote to my own `voteAggregator` + h.forwardToOwnVoteAggregator(vote) // forward vote to my own `voteAggregator` return } // common case: someone else is leader packed := &packedVote{ recipientID: recipientID, - vote: vote, + vote: &messages.ClusterBlockVote{ + BlockID: vote.BlockID, + View: vote.View, + SigData: vote.SigData, + }, } if ok := h.ownOutboundVotes.Push(packed); ok { h.ownOutboundMessageNotifier.Notify() @@ -428,15 +426,30 @@ func (h *MessageHub) Process(channel channels.Channel, originID flow.Identifier, Message: msg, }) case *messages.ClusterBlockVote: - h.forwardToOwnVoteAggregator(msg, originID) + vote, err := model.NewVote(model.UntrustedVote{ + View: msg.View, + BlockID: msg.BlockID, + SignerID: originID, + SigData: msg.SigData, + }) + if err != nil { + h.log.Warn().Err(err).Msgf("failed to forward vote") + } + + h.forwardToOwnVoteAggregator(vote) case *messages.ClusterTimeoutObject: - t := &model.TimeoutObject{ - View: msg.View, - NewestQC: msg.NewestQC, - LastViewTC: msg.LastViewTC, - SignerID: originID, - SigData: msg.SigData, - TimeoutTick: msg.TimeoutTick, + t, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject{ + View: msg.View, + NewestQC: msg.NewestQC, + LastViewTC: msg.LastViewTC, + SignerID: originID, + SigData: msg.SigData, + TimeoutTick: msg.TimeoutTick, + }, + ) + if err != nil { + return fmt.Errorf("could not construct timeout object: %w", err) } h.forwardToOwnTimeoutAggregator(t) default: @@ -452,21 +465,15 @@ func (h *MessageHub) Process(channel channels.Channel, originID flow.Identifier, // forwardToOwnVoteAggregator converts vote to generic `model.Vote`, logs vote and forwards it to own `voteAggregator`. // Per API convention, timeoutAggregator` is non-blocking, hence, this call returns quickly. -func (h *MessageHub) forwardToOwnVoteAggregator(vote *messages.ClusterBlockVote, originID flow.Identifier) { +func (h *MessageHub) forwardToOwnVoteAggregator(vote *model.Vote) { h.engineMetrics.MessageReceived(metrics.EngineCollectionMessageHub, metrics.MessageBlockVote) - v := &model.Vote{ - View: vote.View, - BlockID: vote.BlockID, - SignerID: originID, - SigData: vote.SigData, - } h.log.Debug(). - Uint64("block_view", v.View). - Hex("block_id", v.BlockID[:]). - Hex("voter", v.SignerID[:]). - Str("vote_id", v.ID().String()). + Uint64("block_view", vote.View). + Hex("block_id", vote.BlockID[:]). + Hex("voter", vote.SignerID[:]). + Str("vote_id", vote.ID().String()). Msg("block vote received, forwarding block vote to hotstuff vote aggregator") - h.voteAggregator.AddVote(v) + h.voteAggregator.AddVote(vote) } // forwardToOwnTimeoutAggregator logs timeout and forwards it to own `timeoutAggregator`. diff --git a/engine/collection/message_hub/message_hub_test.go b/engine/collection/message_hub/message_hub_test.go index 222ecdc31d2..aacc8d37b60 100644 --- a/engine/collection/message_hub/message_hub_test.go +++ b/engine/collection/message_hub/message_hub_test.go @@ -307,7 +307,7 @@ func (s *MessageHubSuite) TestProcessMultipleMessagesHappyPath() { }).Return(nil) // submit vote - s.hub.OnOwnVote(vote.BlockID, vote.View, vote.SigData, recipientID) + s.hub.OnOwnVote(vote, recipientID) }) s.Run("timeout", func() { wg.Add(1) diff --git a/engine/collection/test/cluster_switchover_test.go b/engine/collection/test/cluster_switchover_test.go index dea18459cd1..87d1a7eb2a0 100644 --- a/engine/collection/test/cluster_switchover_test.go +++ b/engine/collection/test/cluster_switchover_test.go @@ -26,6 +26,7 @@ import ( "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" + "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/utils/unittest" ) @@ -101,10 +102,13 @@ func NewClusterSwitchoverTestCase(t *testing.T, conf ClusterSwitchoverTestConf) seal.ResultID = result.ID() safetyParams, err := protocol.DefaultEpochSafetyParams(root.Header.ChainID) require.NoError(t, err) + minEpochStateEntry, err := inmem.EpochProtocolStateFromServiceEvents(setup, commit) + require.NoError(t, err) rootProtocolState, err := kvstore.NewDefaultKVStore( safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, - inmem.EpochProtocolStateFromServiceEvents(setup, commit).ID()) + minEpochStateEntry.ID(), + ) require.NoError(t, err) root.Payload.ProtocolStateID = rootProtocolState.ID() tc.root, err = inmem.SnapshotFromBootstrapState(root, result, seal, qc) @@ -300,7 +304,7 @@ func (tc *ClusterSwitchoverTestCase) ExpectTransaction(epochCounter uint64, clus // ClusterState opens and returns a read-only cluster state for the given node and cluster ID. func (tc *ClusterSwitchoverTestCase) ClusterState(node testmock.CollectionNode, clusterID flow.ChainID, epoch uint64) cluster.State { - state, err := bcluster.OpenState(node.PublicDB, node.Tracer, node.Headers, node.ClusterPayloads, clusterID, epoch) + state, err := bcluster.OpenState(badgerimpl.ToDB(node.PublicDB), node.Tracer, node.Headers, node.ClusterPayloads, clusterID, epoch) require.NoError(tc.T(), err) return state } diff --git a/engine/common/follower/cache/cache.go b/engine/common/follower/cache/cache.go index 56576e235d9..d5f8e491e11 100644 --- a/engine/common/follower/cache/cache.go +++ b/engine/common/follower/cache/cache.go @@ -166,10 +166,10 @@ func (c *Cache) AddBlocks(batch []*flow.Block) (certifiedBatch []*flow.Block, ce // Otherwise, all blocks in the batch _except_ for the last one are certified if bc.batchChild != nil { certifiedBatch = batch - certifyingQC = bc.batchChild.Header.QuorumCertificate() + certifyingQC = bc.batchChild.Header.ParentQC() } else { certifiedBatch = batch[:batchSize-1] - certifyingQC = batch[batchSize-1].Header.QuorumCertificate() + certifyingQC = batch[batchSize-1].Header.ParentQC() } // caution: in the case `len(batch) == 1`, the `certifiedBatch` might be empty now (else-case) diff --git a/engine/common/follower/cache/cache_test.go b/engine/common/follower/cache/cache_test.go index b94a7cc31ce..8f7c1c44623 100644 --- a/engine/common/follower/cache/cache_test.go +++ b/engine/common/follower/cache/cache_test.go @@ -154,7 +154,7 @@ func (s *CacheSuite) TestBlockInTheMiddle() { certifiedBlocks, certifiedQC, err = s.cache.AddBlocks(blocks[1:2]) require.NoError(s.T(), err) require.Equal(s.T(), blocks[:2], certifiedBlocks) - require.Equal(s.T(), blocks[2].Header.QuorumCertificate(), certifiedQC) + require.Equal(s.T(), blocks[2].Header.ParentQC(), certifiedQC) } // TestAddBatch tests a scenario: B1 <- ... <- BN added in one batch. @@ -165,7 +165,7 @@ func (s *CacheSuite) TestAddBatch() { certifiedBatch, certifyingQC, err := s.cache.AddBlocks(blocks) require.NoError(s.T(), err) require.Equal(s.T(), blocks[:len(blocks)-1], certifiedBatch) - require.Equal(s.T(), blocks[len(blocks)-1].Header.QuorumCertificate(), certifyingQC) + require.Equal(s.T(), blocks[len(blocks)-1].Header.ParentQC(), certifyingQC) } // TestDuplicatedBatch checks that processing redundant inputs rejects batches where all blocks @@ -176,7 +176,7 @@ func (s *CacheSuite) TestDuplicatedBatch() { certifiedBatch, certifyingQC, err := s.cache.AddBlocks(blocks[1:]) require.NoError(s.T(), err) require.Equal(s.T(), blocks[1:len(blocks)-1], certifiedBatch) - require.Equal(s.T(), blocks[len(blocks)-1].Header.QuorumCertificate(), certifyingQC) + require.Equal(s.T(), blocks[len(blocks)-1].Header.ParentQC(), certifyingQC) // add same batch again, this has to be rejected as redundant input certifiedBatch, certifyingQC, err = s.cache.AddBlocks(blocks[1:]) @@ -189,7 +189,7 @@ func (s *CacheSuite) TestDuplicatedBatch() { certifiedBatch, certifyingQC, err = s.cache.AddBlocks(blocks) require.NoError(s.T(), err) require.Equal(s.T(), blocks[:len(blocks)-1], certifiedBatch) - require.Equal(s.T(), blocks[len(blocks)-1].Header.QuorumCertificate(), certifyingQC) + require.Equal(s.T(), blocks[len(blocks)-1].Header.ParentQC(), certifyingQC) } // TestPruneUpToView tests that blocks lower than pruned height will be properly filtered out from incoming batch. @@ -199,7 +199,7 @@ func (s *CacheSuite) TestPruneUpToView() { certifiedBatch, certifyingQC, err := s.cache.AddBlocks(blocks) require.NoError(s.T(), err) require.Equal(s.T(), blocks[1:len(blocks)-1], certifiedBatch) - require.Equal(s.T(), blocks[len(blocks)-1].Header.QuorumCertificate(), certifyingQC) + require.Equal(s.T(), blocks[len(blocks)-1].Header.ParentQC(), certifyingQC) } // TestConcurrentAdd simulates multiple workers adding batches of blocks out of order. diff --git a/engine/common/follower/compliance_core.go b/engine/common/follower/compliance_core.go index 4a58963216c..cd493d59d8d 100644 --- a/engine/common/follower/compliance_core.go +++ b/engine/common/follower/compliance_core.go @@ -312,7 +312,7 @@ func rangeToCertifiedBlocks(certifiedRange []*flow.Block, certifyingQC *flow.Quo for i, block := range certifiedRange { var qc *flow.QuorumCertificate if i < lastIndex { - qc = certifiedRange[i+1].Header.QuorumCertificate() + qc = certifiedRange[i+1].Header.ParentQC() } else { qc = certifyingQC } diff --git a/engine/common/follower/compliance_core_test.go b/engine/common/follower/compliance_core_test.go index 8930d2e19ce..5faab6fbbfe 100644 --- a/engine/common/follower/compliance_core_test.go +++ b/engine/common/follower/compliance_core_test.go @@ -135,7 +135,7 @@ func (s *CoreSuite) TestProcessingRangeHappyPath() { var wg sync.WaitGroup wg.Add(len(blocks) - 1) for i := 1; i < len(blocks); i++ { - s.state.On("ExtendCertified", mock.Anything, blocks[i-1], blocks[i].Header.QuorumCertificate()).Return(nil).Once() + s.state.On("ExtendCertified", mock.Anything, blocks[i-1], blocks[i].Header.ParentQC()).Return(nil).Once() s.follower.On("AddCertifiedBlock", blockWithID(blocks[i-1].ID())).Run(func(args mock.Arguments) { wg.Done() }).Return().Once() diff --git a/engine/common/follower/compliance_engine.go b/engine/common/follower/compliance_engine.go index 1e1a26f6452..0d5fd1f8f09 100644 --- a/engine/common/follower/compliance_engine.go +++ b/engine/common/follower/compliance_engine.go @@ -127,7 +127,15 @@ func NewComplianceLayer( headers: headers, core: core, } - e.finalizedBlockTracker.Track(model.BlockFromFlow(finalized)) + + var block *model.Block + if finalized.ContainsParentQC() { + block = model.BlockFromFlow(finalized) + } else { + block = model.GenesisBlockFromFlow(finalized) + } + + e.finalizedBlockTracker.Track(block) for _, apply := range opts { apply(e) diff --git a/engine/common/follower/integration_test.go b/engine/common/follower/integration_test.go index fd5d4fa43ad..69e5ee0b2a7 100644 --- a/engine/common/follower/integration_test.go +++ b/engine/common/follower/integration_test.go @@ -28,7 +28,9 @@ import ( pbadger "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/state/protocol/util" + "github.com/onflow/flow-go/storage" bstorage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/utils/unittest" ) @@ -45,6 +47,7 @@ func TestFollowerHappyPath(t *testing.T) { allIdentities := unittest.CompleteIdentitySet() rootSnapshot := unittest.RootSnapshotFixture(allIdentities) unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := unittest.Logger() @@ -54,7 +57,8 @@ func TestFollowerHappyPath(t *testing.T) { // bootstrap root snapshot state, err := pbadger.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -81,7 +85,7 @@ func TestFollowerHappyPath(t *testing.T) { mockTimer, ) require.NoError(t, err) - finalizer := moduleconsensus.NewFinalizer(db, all.Headers, followerState, tracer) + finalizer := moduleconsensus.NewFinalizer(badgerimpl.ToDB(db).Reader(), all.Headers, followerState, tracer) rootHeader, err := rootSnapshot.Head() require.NoError(t, err) rootQC, err := rootSnapshot.QuorumCertificate() diff --git a/engine/common/follower/pending_tree/pending_tree_test.go b/engine/common/follower/pending_tree/pending_tree_test.go index 66b53058b23..420c8a391b3 100644 --- a/engine/common/follower/pending_tree/pending_tree_test.go +++ b/engine/common/follower/pending_tree/pending_tree_test.go @@ -36,7 +36,7 @@ func (s *PendingTreeSuite) SetupTest() { // Having: F ← B1 ← B2 ← B3 // Add [B1, B2, B3], expect to get [B1;QC_B1, B2;QC_B2; B3;QC_B3] func (s *PendingTreeSuite) TestBlocksConnectToFinalized() { - blocks := certifiedBlocksFixture(3, s.finalized) + blocks := certifiedBlocksFixture(s.T(), 3, s.finalized) connectedBlocks, err := s.pendingTree.AddBlocks(blocks) require.NoError(s.T(), err) require.Equal(s.T(), blocks, connectedBlocks) @@ -47,7 +47,7 @@ func (s *PendingTreeSuite) TestBlocksConnectToFinalized() { // Having: F ← B1 ← B2 ← B3 // Add [B2, B3], expect to get [] func (s *PendingTreeSuite) TestBlocksAreNotConnectedToFinalized() { - blocks := certifiedBlocksFixture(3, s.finalized) + blocks := certifiedBlocksFixture(s.T(), 3, s.finalized) connectedBlocks, err := s.pendingTree.AddBlocks(blocks[1:]) require.NoError(s.T(), err) require.Empty(s.T(), connectedBlocks) @@ -59,7 +59,7 @@ func (s *PendingTreeSuite) TestBlocksAreNotConnectedToFinalized() { // Add [B3, B4, B5], expect to get [] // Add [B1, B2], expect to get [B1, B2, B3, B4, B5] func (s *PendingTreeSuite) TestInsertingMissingBlockToFinalized() { - blocks := certifiedBlocksFixture(5, s.finalized) + blocks := certifiedBlocksFixture(s.T(), 5, s.finalized) connectedBlocks, err := s.pendingTree.AddBlocks(blocks[len(blocks)-3:]) require.NoError(s.T(), err) require.Empty(s.T(), connectedBlocks) @@ -81,14 +81,14 @@ func (s *PendingTreeSuite) TestInsertingMissingBlockToFinalized() { // Add [B4, B5, B6, B7], expect to get [] // Add [B1], expect to get [B1, B2, B3, B4, B5, B6, B7] func (s *PendingTreeSuite) TestAllConnectedForksAreCollected() { - longestFork := certifiedBlocksFixture(5, s.finalized) + longestFork := certifiedBlocksFixture(s.T(), 5, s.finalized) B2 := unittest.BlockWithParentFixture(longestFork[0].Block.Header) // make sure short fork doesn't have conflicting views, so we don't trigger exception B2.Header.View = longestFork[len(longestFork)-1].Block.Header.View + 1 B3 := unittest.BlockWithParentFixture(B2.Header) shortFork := []flow.CertifiedBlock{{ Block: B2, - CertifyingQC: B3.Header.QuorumCertificate(), + CertifyingQC: B3.Header.ParentQC(), }, certifiedBlockFixture(B3)} connectedBlocks, err := s.pendingTree.AddBlocks(shortFork) @@ -106,7 +106,7 @@ func (s *PendingTreeSuite) TestAllConnectedForksAreCollected() { // TestAddingConnectedBlocks tests that adding blocks that were already reported as connected is no-op. func (s *PendingTreeSuite) TestAddingConnectedBlocks() { - blocks := certifiedBlocksFixture(3, s.finalized) + blocks := certifiedBlocksFixture(s.T(), 3, s.finalized) connectedBlocks, err := s.pendingTree.AddBlocks(blocks) require.NoError(s.T(), err) require.Equal(s.T(), blocks, connectedBlocks) @@ -141,7 +141,7 @@ func (s *PendingTreeSuite) TestByzantineThresholdExceeded() { // Randomly shuffle [B, C, D, E] and add it as single batch, expect [] connected blocks. // Insert [A], expect [A, B, C, D, E] connected blocks. func (s *PendingTreeSuite) TestBatchWithSkipsAndInRandomOrder() { - blocks := certifiedBlocksFixture(5, s.finalized) + blocks := certifiedBlocksFixture(s.T(), 5, s.finalized) rand.Shuffle(len(blocks)-1, func(i, j int) { blocks[i+1], blocks[j+1] = blocks[j+1], blocks[i+1] @@ -172,14 +172,14 @@ func (s *PendingTreeSuite) TestBatchWithSkipsAndInRandomOrder() { // Add [B5, B6, B7], expect to get [] // Finalize B4, expect to get [B5, B6, B7] func (s *PendingTreeSuite) TestResolveBlocksAfterFinalization() { - longestFork := certifiedBlocksFixture(5, s.finalized) + longestFork := certifiedBlocksFixture(s.T(), 5, s.finalized) B2 := unittest.BlockWithParentFixture(longestFork[0].Block.Header) // make sure short fork doesn't have conflicting views, so we don't trigger exception B2.Header.View = longestFork[len(longestFork)-1].Block.Header.View + 1 B3 := unittest.BlockWithParentFixture(B2.Header) shortFork := []flow.CertifiedBlock{{ Block: B2, - CertifyingQC: B3.Header.QuorumCertificate(), + CertifyingQC: B3.Header.ParentQC(), }, certifiedBlockFixture(B3)} connectedBlocks, err := s.pendingTree.AddBlocks(shortFork) @@ -213,7 +213,7 @@ func (s *PendingTreeSuite) TestBlocksLowerThanFinalizedView() { // Finalize A. // Adding [A, B, C, D] returns [D] since A is already finalized, [B, C] are already stored and connected to the finalized state. func (s *PendingTreeSuite) TestAddingBlockAfterFinalization() { - blocks := certifiedBlocksFixture(4, s.finalized) + blocks := certifiedBlocksFixture(s.T(), 4, s.finalized) connectedBlocks, err := s.pendingTree.AddBlocks(blocks[:3]) require.NoError(s.T(), err) @@ -257,11 +257,11 @@ func (s *PendingTreeSuite) TestAddingBlocksWithSameHeight() { } // certifiedBlocksFixture builds a chain of certified blocks starting at some block. -func certifiedBlocksFixture(count int, parent *flow.Header) []flow.CertifiedBlock { +func certifiedBlocksFixture(t *testing.T, count int, parent *flow.Header) []flow.CertifiedBlock { result := make([]flow.CertifiedBlock, 0, count) blocks := unittest.ChainFixtureFrom(count, parent) for i := 0; i < count-1; i++ { - certBlock, err := flow.NewCertifiedBlock(blocks[i], blocks[i+1].Header.QuorumCertificate()) + certBlock, err := flow.NewCertifiedBlock(blocks[i], blocks[i+1].Header.ParentQC()) if err != nil { // this should never happen, as we are specifically constructing a certifying QC for the input block panic(fmt.Sprintf("unexpected error constructing certified block: %s", err.Error())) diff --git a/engine/common/rpc/convert/blocks.go b/engine/common/rpc/convert/blocks.go index 6e3588090ea..147f83e1d2c 100644 --- a/engine/common/rpc/convert/blocks.go +++ b/engine/common/rpc/convert/blocks.go @@ -100,17 +100,26 @@ func BlockSealToMessage(s *flow.Seal) *entities.BlockSeal { } // MessageToBlockSeal converts a protobuf BlockSeal message to a flow.Seal. +// +// All errors indicate the input cannot be converted to a valid seal. func MessageToBlockSeal(m *entities.BlockSeal) (*flow.Seal, error) { finalState, err := MessageToStateCommitment(m.FinalState) if err != nil { return nil, fmt.Errorf("failed to convert message to block seal: %w", err) } - return &flow.Seal{ - BlockID: MessageToIdentifier(m.BlockId), - ResultID: MessageToIdentifier(m.ResultId), - FinalState: finalState, - AggregatedApprovalSigs: MessagesToAggregatedSignatures(m.AggregatedApprovalSigs), - }, nil + seal, err := flow.NewSeal( + flow.UntrustedSeal{ + BlockID: MessageToIdentifier(m.BlockId), + ResultID: MessageToIdentifier(m.ResultId), + FinalState: finalState, + AggregatedApprovalSigs: MessagesToAggregatedSignatures(m.AggregatedApprovalSigs), + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct seal: %w", err) + } + + return seal, nil } // BlockSealsToMessages converts a slice of flow.Seal to a slice of protobuf BlockSeal messages. diff --git a/engine/common/rpc/convert/headers.go b/engine/common/rpc/convert/headers.go index b45686c853d..deb8dbdc233 100644 --- a/engine/common/rpc/convert/headers.go +++ b/engine/common/rpc/convert/headers.go @@ -66,18 +66,30 @@ func MessageToBlockHeader(m *entities.BlockHeader) (*flow.Header, error) { if newestQC == nil { return nil, fmt.Errorf("invalid structure newest QC should be present") } - lastViewTC = &flow.TimeoutCertificate{ - View: m.LastViewTc.View, - NewestQCViews: m.LastViewTc.HighQcViews, - SignerIndices: m.LastViewTc.SignerIndices, - SigData: m.LastViewTc.SigData, - NewestQC: &flow.QuorumCertificate{ - View: newestQC.View, - BlockID: MessageToIdentifier(newestQC.BlockId), - SignerIndices: newestQC.SignerIndices, - SigData: newestQC.SigData, + + qc, err := flow.NewQuorumCertificate(flow.UntrustedQuorumCertificate{ + View: newestQC.View, + BlockID: MessageToIdentifier(newestQC.BlockId), + SignerIndices: newestQC.SignerIndices, + SigData: newestQC.SigData, + }) + if err != nil { + return nil, fmt.Errorf("could not build quorum certificate: %w", err) + } + + tc, err := flow.NewTimeoutCertificate( + flow.UntrustedTimeoutCertificate{ + View: m.LastViewTc.View, + NewestQCViews: m.LastViewTc.HighQcViews, + NewestQC: qc, + SignerIndices: m.LastViewTc.SignerIndices, + SigData: m.LastViewTc.SigData, }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct timeout certificate: %w", err) } + lastViewTC = tc } return &flow.Header{ diff --git a/engine/common/rpc/execution_node_identities_provider.go b/engine/common/rpc/execution_node_identities_provider.go index 5e70344941e..a39d60e35d1 100644 --- a/engine/common/rpc/execution_node_identities_provider.go +++ b/engine/common/rpc/execution_node_identities_provider.go @@ -22,6 +22,10 @@ const maxAttemptsForExecutionReceipt = 3 // MaxNodesCnt is the maximum number of nodes that will be contacted to complete an API request. const MaxNodesCnt = 3 +// ErrNoENsFoundForExecutionResult is returned when no execution nodes were found that produced +// the requested execution result and matches all operator's criteria. +var ErrNoENsFoundForExecutionResult = fmt.Errorf("no execution nodes found for execution result") + // ExecutionNodeIdentitiesProvider is a container for elements required to retrieve // execution node identities for a given block ID. type ExecutionNodeIdentitiesProvider struct { @@ -64,7 +68,11 @@ func NewExecutionNodeIdentitiesProvider( // ExecutionNodesForBlockID returns upto maxNodesCnt number of randomly chosen execution node identities // which have executed the given block ID. -// If no such execution node is found, an InsufficientExecutionReceipts error is returned. +// +// Expected errors during normal operations: +// - InsufficientExecutionReceipts - If no such execution node is found. +// - ErrNoENsFoundForExecutionResult - if no execution nodes were found that produced +// the provided execution result and matched the operators criteria func (e *ExecutionNodeIdentitiesProvider) ExecutionNodesForBlockID( ctx context.Context, blockID flow.Identifier, @@ -131,7 +139,63 @@ func (e *ExecutionNodeIdentitiesProvider) ExecutionNodesForBlockID( } if len(subsetENs) == 0 { - return nil, fmt.Errorf("no matching execution node found for block ID %v", blockID) + return nil, ErrNoENsFoundForExecutionResult + } + + return subsetENs, nil +} + +// ExecutionNodesForResultID returns execution node identities that produced receipts +// for the specific execution result ID within the given block. +// +// Expected errors during normal operation: +// - ErrNoENsFoundForExecutionResult - if no execution nodes were found that produced +// the provided execution result and matched the operators criteria +func (e *ExecutionNodeIdentitiesProvider) ExecutionNodesForResultID( + blockID flow.Identifier, + resultID flow.Identifier, +) (flow.IdentitySkeletonList, error) { + var executorIDs flow.IdentifierList + rootBlock := e.state.Params().FinalizedRoot() + + // if block is a root block, don't look for execution receipts as there are none for root block. + if rootBlock.ID() == blockID { + executorIdentities, err := e.state.Final().Identities(filter.HasRole[flow.Identity](flow.RoleExecution)) + if err != nil { + return nil, fmt.Errorf("failed to retreive execution IDs for block ID %v: %w", blockID, err) + } + + executorIDs = append(executorIDs, executorIdentities.NodeIDs()...) + } else { + allReceipts, err := e.executionReceipts.ByBlockID(blockID) + if err != nil { + return nil, fmt.Errorf("failed to retrieve execution receipts for block ID %v: %w", blockID, err) + } + + executionReceiptMetaList := make(flow.ExecutionReceiptMetaList, 0, len(allReceipts)) + for _, r := range allReceipts { + executionReceiptMetaList = append(executionReceiptMetaList, r.Meta()) + } + + receiptsByResultID := executionReceiptMetaList.GroupByResultID() + targetReceipts := receiptsByResultID.GetGroup(resultID) + + if len(targetReceipts) == 0 { + return nil, fmt.Errorf("no execution receipts found for result ID %v in block %v", resultID, blockID) + } + + for _, receipt := range targetReceipts { + executorIDs = append(executorIDs, receipt.ExecutorID) + } + } + + subsetENs, err := e.chooseExecutionNodes(executorIDs) + if err != nil { + return nil, fmt.Errorf("failed to retrieve execution IDs for result ID %v: %w", resultID, err) + } + + if len(subsetENs) == 0 { + return nil, ErrNoENsFoundForExecutionResult } return subsetENs, nil diff --git a/engine/common/rpc/execution_node_identities_provider_test.go b/engine/common/rpc/execution_node_identities_provider_test.go index 2b033e3dac0..f08a1f9192a 100644 --- a/engine/common/rpc/execution_node_identities_provider_test.go +++ b/engine/common/rpc/execution_node_identities_provider_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" protocol "github.com/onflow/flow-go/state/protocol/mock" @@ -125,7 +125,7 @@ func (suite *ENIdentitiesProviderSuite) TestExecutionNodesForBlockID() { allExecNodes, err := execNodeIdentitiesProvider.ExecutionNodesForBlockID(context.Background(), block.ID()) require.NoError(suite.T(), err) - execNodeSelectorFactory := backend.NewNodeSelectorFactory(false) + execNodeSelectorFactory := node_communicator.NewNodeSelectorFactory(false) execSelector, err := execNodeSelectorFactory.SelectNodes(allExecNodes) require.NoError(suite.T(), err) @@ -164,7 +164,7 @@ func (suite *ENIdentitiesProviderSuite) TestExecutionNodesForBlockID() { allExecNodes, err := execNodeIdentitiesProvider.ExecutionNodesForBlockID(context.Background(), block.ID()) require.NoError(suite.T(), err) - execNodeSelectorFactory := backend.NewNodeSelectorFactory(false) + execNodeSelectorFactory := node_communicator.NewNodeSelectorFactory(false) execSelector, err := execNodeSelectorFactory.SelectNodes(allExecNodes) require.NoError(suite.T(), err) diff --git a/engine/common/synchronization/engine.go b/engine/common/synchronization/engine.go index aeae8eb7a8d..5d7e1ebfbcf 100644 --- a/engine/common/synchronization/engine.go +++ b/engine/common/synchronization/engine.go @@ -425,6 +425,43 @@ func (e *Engine) sendRequests(participants flow.IdentifierList, ranges []chainsy e.metrics.MessageSent(metrics.EngineSynchronization, metrics.MessageRangeRequest) } + blockIDs := []string{ + "26a8fbb45d9450cae9ee5609c84a8c8dac56c3438c91b218e215446f889d23d8", // 130290664 + "95982c73effa353324e299e4cb6d374cb2c9adc8d951593b1432c643f5785c25", // 130290663 + "177c731b4656c52188e1bedbb187eb21da921e524e4e9745a1e7ae2f6815aba5", // 130290662 + "b6f4ddd1ea1afd02571341614f90f4765625d5383f9197ed98da3d943644af8b", // 130290661 + "5b7679b7f86197ce4a4cfcca7fd4926d8cad4a0d0369f07613d9960c1f30a533", // 130290660 + "40166922ae42fd8e269b0b7bdb11137f50954450ffa32ec3727128149ec74e34", // 130290659 + "28d657bc8c6b0608e4c7caef3fb37b5b05f58c7bfedd1b8e556bd0940735e9cd", // 130290658 + "7cc6e59b0dd3418e02cf5afbb4b2d22356e46ba1b514e31cf90b7e8cab4d0bec", // 130290657 + "339ca0532dd44188f94d9e7e2baf4dfe81055b4036a4c5ce931e356055a8965c", // 130290656 + "8166e6f01e71513782a2d004702759f99ce6b758402767559815f98d44a3b85a", // 130290655 + "ecf8b6bb193ea368b606913d5dead98fe8cbebca66aa5869682f2cd13c14c5f7", // 130290654 + "16f0c55dfb76f53ed7492798a63e2860b7438c8c330515fa084dfae5ad8d3779", // 130290653 + "a00b9c955916ae25c8f96b259a79a4171af323dfa9abf5c32bd9788e7b1d9ce0", // 130290652 + "86013212dace29b0c454998ec134f84131ae39ac19c3ddd0c5e5647aa169f98e", // 130290651 + "9a53bba124f7cbeb526283fb9cedcf37ad9fd8f818d8c5e95ced799b4158a5c6", // 130290650 + "18d48b81e195f74556eeb7f8e898f617b64279f16485e1909cb4450295ef6010", // 130290649 + "3cce19c4271a1b95305c41d5d892557b6e6e07c936e1db723a9f032099f49944", // 130290648 + "5a754af07cf5f43db3d34d26f83d3b32a4d7003df2d470ea062b4ad3730a592d", // 130290647 + "f6aa413316f33f3f46079136c5a3edaf90ee66712b97e6b452c342222e83704d", // 130290646 + "69ac0584fb6f2daa731515af2bf248d0f5789f02b1f7f398d00d710541eaa9e8", // 130290645 + "b22bfb09edc573eb04cdd6f5df83d2c62b04b1f92a38e5e9b4ded8ed6257893c", // 130290644 + "e3b62139cd71090949ac7dcb4c9ee9fac1e392c6dd55695a0da92cb78f7ce1ca", // 130290643 + } + blocksIDs := make([]flow.Identifier, len(blockIDs)) + for i, blockID := range blockIDs { + block, err := flow.HexStringToIdentifier(blockID) + if err != nil { + e.log.Fatal().Err(err).Msg("failed to parse block ID") + } + blocksIDs[i] = block + } + + batches = append(batches, chainsync.Batch{ + BlockIDs: blocksIDs, + }) + for _, batch := range batches { nonce, err := rand.Uint64() if err != nil { @@ -444,7 +481,7 @@ func (e *Engine) sendRequests(participants flow.IdentifierList, ranges []chainsy errs = multierror.Append(errs, fmt.Errorf("could not submit batch request: %w", err)) continue } - e.log.Debug(). + e.log.Info(). Strs("block_ids", flow.IdentifierList(batch.BlockIDs).Strings()). Uint64("range_nonce", req.Nonce). Msg("batch requested") diff --git a/engine/common/version/version_control.go b/engine/common/version/version_control.go index f664738fda2..41a1d40c4be 100644 --- a/engine/common/version/version_control.go +++ b/engine/common/version/version_control.go @@ -50,6 +50,7 @@ var defaultCompatibilityOverrides = map[string]struct{}{ "0.41.0": {}, // mainnet, testnet "0.41.4": {}, // mainnet, testnet "0.42.0": {}, // mainnet, testnet + "0.42.1": {}, // mainnet, testnet } // VersionControl manages the version control system for the node. diff --git a/engine/consensus/approvals/approval_collector.go b/engine/consensus/approvals/approval_collector.go index 8d591093160..03447ec91ba 100644 --- a/engine/consensus/approvals/approval_collector.go +++ b/engine/consensus/approvals/approval_collector.go @@ -8,6 +8,7 @@ import ( "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool" ) @@ -100,6 +101,8 @@ func (c *ApprovalCollector) IncorporatedResult() *flow.IncorporatedResult { return c.incorporatedResult } +// SealResult generates and stores the seal into the mempool. +// No errors are expected during normal operation. func (c *ApprovalCollector) SealResult() error { // get final state of execution result finalState, err := c.incorporatedResult.Result.FinalStateCommitment() @@ -111,11 +114,16 @@ func (c *ApprovalCollector) SealResult() error { // TODO: Check SPoCK proofs // generate & store seal - seal := &flow.Seal{ - BlockID: c.incorporatedResult.Result.BlockID, - ResultID: c.incorporatedResult.Result.ID(), - FinalState: finalState, - AggregatedApprovalSigs: c.aggregatedSignatures.Collect(), + seal, err := flow.NewSeal( + flow.UntrustedSeal{ + BlockID: c.incorporatedResult.Result.BlockID, + ResultID: c.incorporatedResult.Result.ID(), + FinalState: finalState, + AggregatedApprovalSigs: c.aggregatedSignatures.Collect(), + }, + ) + if err != nil { + return irrecoverable.NewExceptionf("could not construct seal : %w", err) } // Adding a seal that already exists in the mempool is a NoOp. But to reduce log diff --git a/engine/consensus/approvals/testutil.go b/engine/consensus/approvals/testutil.go index d590bbaf57f..020d996c453 100644 --- a/engine/consensus/approvals/testutil.go +++ b/engine/consensus/approvals/testutil.go @@ -72,9 +72,12 @@ func (s *BaseApprovalsTestSuite) SetupTest() { s.IncorporatedBlock = unittest.BlockHeaderWithParentFixture(s.Block) // compose incorporated result - s.IncorporatedResult = unittest.IncorporatedResult.Fixture( - unittest.IncorporatedResult.WithResult(result), - unittest.IncorporatedResult.WithIncorporatedBlockID(s.IncorporatedBlock.ID())) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: s.IncorporatedBlock.ID(), + Result: result, + }) + require.NoError(s.T(), err) + s.IncorporatedResult = incorporatedResult } // BaseAssignmentCollectorTestSuite is a base suite for testing assignment collectors, contains mocks for all diff --git a/engine/consensus/dkg/reactor_engine_test.go b/engine/consensus/dkg/reactor_engine_test.go index b6612616c69..e28c291f20f 100644 --- a/engine/consensus/dkg/reactor_engine_test.go +++ b/engine/consensus/dkg/reactor_engine_test.go @@ -114,7 +114,7 @@ func (suite *ReactorEngineSuite_SetupPhase) SetupTest() { // expectedPrivKey is the expected private share produced by the dkg run. We // will mock the controller to return this value, and we will check it // against the value that gets inserted in the DB at the end. - suite.expectedPrivateKey = unittest.PrivateKeyFixture(crypto.BLSBLS12381, 48) + suite.expectedPrivateKey = unittest.PrivateKeyFixture(crypto.BLSBLS12381) // mock protocol state suite.currentEpoch = new(protocol.CommittedEpoch) diff --git a/engine/consensus/message_hub/message_hub.go b/engine/consensus/message_hub/message_hub.go index cbc1fc03b61..a545e9bc926 100644 --- a/engine/consensus/message_hub/message_hub.go +++ b/engine/consensus/message_hub/message_hub.go @@ -389,23 +389,21 @@ func (h *MessageHub) provideProposal(proposal *messages.BlockProposal, recipient // OnOwnVote propagates the vote to relevant recipient(s): // - [common case] vote is queued and is sent via unicast to another node that is the next leader by worker // - [special case] this node is the next leader: vote is directly forwarded to the node's internal `VoteAggregator` -func (h *MessageHub) OnOwnVote(blockID flow.Identifier, view uint64, sigData []byte, recipientID flow.Identifier) { - vote := &messages.BlockVote{ - BlockID: blockID, - View: view, - SigData: sigData, - } - +func (h *MessageHub) OnOwnVote(vote *model.Vote, recipientID flow.Identifier) { // special case: I am the next leader if recipientID == h.me.NodeID() { - h.forwardToOwnVoteAggregator(vote, h.me.NodeID()) // forward vote to my own `voteAggregator` + h.forwardToOwnVoteAggregator(vote) // forward vote to my own `voteAggregator` return } // common case: someone else is leader packed := &packedVote{ recipientID: recipientID, - vote: vote, + vote: &messages.BlockVote{ + BlockID: vote.BlockID, + View: vote.View, + SigData: vote.SigData, + }, } if ok := h.ownOutboundVotes.Push(packed); ok { h.ownOutboundMessageNotifier.Notify() @@ -463,15 +461,35 @@ func (h *MessageHub) Process(channel channels.Channel, originID flow.Identifier, Message: msg, }) case *messages.BlockVote: - h.forwardToOwnVoteAggregator(msg, originID) + vote, err := model.NewVote(model.UntrustedVote{ + View: msg.View, + BlockID: msg.BlockID, + SignerID: originID, + SigData: msg.SigData, + }) + if err != nil { + h.log.Warn(). + Hex("origin_id", originID[:]). + Hex("block_id", msg.BlockID[:]). + Uint64("view", msg.View). + Err(err).Msgf("received invalid vote message") + return err + } + + h.forwardToOwnVoteAggregator(vote) case *messages.TimeoutObject: - t := &model.TimeoutObject{ - View: msg.View, - NewestQC: msg.NewestQC, - LastViewTC: msg.LastViewTC, - SignerID: originID, - SigData: msg.SigData, - TimeoutTick: msg.TimeoutTick, + t, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject{ + View: msg.View, + NewestQC: msg.NewestQC, + LastViewTC: msg.LastViewTC, + SignerID: originID, + SigData: msg.SigData, + TimeoutTick: msg.TimeoutTick, + }, + ) + if err != nil { + return fmt.Errorf("could not construct timeout object: %w", err) } h.forwardToOwnTimeoutAggregator(t) default: @@ -487,21 +505,15 @@ func (h *MessageHub) Process(channel channels.Channel, originID flow.Identifier, // forwardToOwnVoteAggregator converts vote to generic `model.Vote`, logs vote and forwards it to own `voteAggregator`. // Per API convention, timeoutAggregator` is non-blocking, hence, this call returns quickly. -func (h *MessageHub) forwardToOwnVoteAggregator(vote *messages.BlockVote, originID flow.Identifier) { +func (h *MessageHub) forwardToOwnVoteAggregator(vote *model.Vote) { h.engineMetrics.MessageReceived(metrics.EngineConsensusMessageHub, metrics.MessageBlockVote) - v := &model.Vote{ - View: vote.View, - BlockID: vote.BlockID, - SignerID: originID, - SigData: vote.SigData, - } h.log.Debug(). - Uint64("block_view", v.View). - Hex("block_id", v.BlockID[:]). - Hex("voter", v.SignerID[:]). - Str("vote_id", v.ID().String()). + Uint64("block_view", vote.View). + Hex("block_id", vote.BlockID[:]). + Hex("voter", vote.SignerID[:]). + Str("vote_id", vote.ID().String()). Msg("block vote received, forwarding block vote to hotstuff vote aggregator") - h.voteAggregator.AddVote(v) + h.voteAggregator.AddVote(vote) } // forwardToOwnTimeoutAggregator logs timeout and forwards it to own `timeoutAggregator`. diff --git a/engine/consensus/message_hub/message_hub_test.go b/engine/consensus/message_hub/message_hub_test.go index e5cd47ca1c1..ab6ef79efd9 100644 --- a/engine/consensus/message_hub/message_hub_test.go +++ b/engine/consensus/message_hub/message_hub_test.go @@ -289,7 +289,7 @@ func (s *MessageHubSuite) TestProcessMultipleMessagesHappyPath() { }).Return(nil) // submit vote - s.hub.OnOwnVote(vote.BlockID, vote.View, vote.SigData, recipientID) + s.hub.OnOwnVote(vote, recipientID) }) s.Run("timeout", func() { wg.Add(1) diff --git a/engine/consensus/sealing/core.go b/engine/consensus/sealing/core.go index 69686187cbf..617c57e240e 100644 --- a/engine/consensus/sealing/core.go +++ b/engine/consensus/sealing/core.go @@ -166,7 +166,13 @@ func (c *Core) RepopulateAssignmentCollectorTree(payloads storage.Payloads) erro Msg("skipping outdated block referenced in root sealing segment") continue } - incorporatedResult := flow.NewIncorporatedResult(blockID, result) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: blockID, + Result: result, + }) + if err != nil { + return fmt.Errorf("could not create incorporated result for block (%x): %w", blockID, err) + } err = c.ProcessIncorporatedResult(incorporatedResult) if err != nil { return fmt.Errorf("could not process incorporated result from block %s: %w", blockID, err) diff --git a/engine/consensus/sealing/engine.go b/engine/consensus/sealing/engine.go index 352a0e423f3..0d8ad38de07 100644 --- a/engine/consensus/sealing/engine.go +++ b/engine/consensus/sealing/engine.go @@ -485,7 +485,13 @@ func (e *Engine) processIncorporatedBlock(incorporatedBlockID flow.Identifier) e return fmt.Errorf("could not retrieve receipt incorporated in block %v: %w", incorporatedBlock.ParentID, err) } - incorporatedResult := flow.NewIncorporatedResult(incorporatedBlock.ParentID, result) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: incorporatedBlock.ParentID, + Result: result, + }) + if err != nil { + return fmt.Errorf("could not create incorporated result for block (%x): %w", incorporatedBlock.ParentID, err) + } added := e.pendingIncorporatedResults.Push(incorporatedResult) if !added { // Not being able to queue an incorporated result is a fatal edge case. It might happen, if the diff --git a/engine/consensus/sealing/engine_test.go b/engine/consensus/sealing/engine_test.go index f83ada42144..9f710c1a628 100644 --- a/engine/consensus/sealing/engine_test.go +++ b/engine/consensus/sealing/engine_test.go @@ -124,7 +124,11 @@ func (s *SealingEngineSuite) TestOnBlockIncorporated() { index.ResultIDs = append(index.ReceiptIDs, result.ID()) s.results.On("ByID", result.ID()).Return(result, nil).Once() - IR := flow.NewIncorporatedResult(parentBlock.ID(), result) + IR, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: parentBlock.ID(), + Result: result, + }) + require.NoError(s.T(), err) s.core.On("ProcessIncorporatedResult", IR).Return(nil).Once() } s.index.On("ByBlockID", parentBlock.ID()).Return(index, nil) diff --git a/engine/execution/computation/computer/computer.go b/engine/execution/computation/computer/computer.go index e38087cfb04..08e4187fc01 100644 --- a/engine/execution/computation/computer/computer.go +++ b/engine/execution/computation/computer/computer.go @@ -105,10 +105,13 @@ type BlockComputer interface { type blockComputer struct { vm fvm.VM vmCtx fvm.Context + systemChunkCtx fvm.Context + callbackCtx fvm.Context metrics module.ExecutionMetrics tracer module.Tracer log zerolog.Logger - systemChunkCtx fvm.Context + systemTxn *flow.TransactionBody + processCallbackTxn *flow.TransactionBody committer ViewCommitter executionDataProvider provider.Provider signer module.Local @@ -119,23 +122,35 @@ type blockComputer struct { maxConcurrency int } +// SystemChunkContext is the context for the system chunk transaction. func SystemChunkContext(vmCtx fvm.Context, metrics module.ExecutionMetrics) fvm.Context { return fvm.NewContextFromParent( vmCtx, - fvm.WithContractDeploymentRestricted(false), - fvm.WithContractRemovalRestricted(false), fvm.WithAuthorizationChecksEnabled(false), fvm.WithSequenceNumberCheckAndIncrementEnabled(false), fvm.WithTransactionFeesEnabled(false), + fvm.WithMetricsReporter(metrics), + fvm.WithContractDeploymentRestricted(false), + fvm.WithContractRemovalRestricted(false), fvm.WithEventCollectionSizeLimit(SystemChunkEventCollectionMaxSize), fvm.WithMemoryAndInteractionLimitsDisabled(), // only the system transaction is allowed to call the block entropy provider fvm.WithRandomSourceHistoryCallAllowed(true), - fvm.WithMetricsReporter(metrics), fvm.WithAccountStorageLimit(false), ) } +// CallbackContext is the context for the scheduled callback transactions. +func CallbackContext(vmCtx fvm.Context, metrics module.ExecutionMetrics) fvm.Context { + return fvm.NewContextFromParent( + vmCtx, + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + fvm.WithTransactionFeesEnabled(false), + fvm.WithMetricsReporter(metrics), + ) +} + // NewBlockComputer creates a new block executor. func NewBlockComputer( vm fvm.VM, @@ -160,18 +175,28 @@ func NewBlockComputer( return nil, fmt.Errorf("program cache writes are not allowed in scripts on Execution nodes") } - systemChunkCtx := SystemChunkContext(vmCtx, metrics) vmCtx = fvm.NewContextFromParent( vmCtx, fvm.WithMetricsReporter(metrics), fvm.WithTracer(tracer)) + + systemTxn, err := blueprints.SystemChunkTransaction(vmCtx.Chain) + if err != nil { + return nil, fmt.Errorf("could not build system chunk transaction: %w", err) + } + + processCallbackTxn := blueprints.ProcessCallbacksTransaction(vmCtx.Chain) + return &blockComputer{ vm: vm, vmCtx: vmCtx, + callbackCtx: CallbackContext(vmCtx, metrics), + systemChunkCtx: SystemChunkContext(vmCtx, metrics), metrics: metrics, tracer: tracer, log: logger, - systemChunkCtx: systemChunkCtx, + systemTxn: systemTxn, + processCallbackTxn: processCallbackTxn, committer: committer, executionDataProvider: executionDataProvider, signer: signer, @@ -207,16 +232,26 @@ func (e *blockComputer) ExecuteBlock( return results, nil } -func (e *blockComputer) queueTransactionRequests( +func (e *blockComputer) userTransactionsCount(collections []*entity.CompleteCollection) int { + count := 0 + for _, collection := range collections { + count += len(collection.Transactions) + } + + return count +} + +func (e *blockComputer) queueUserTransactions( blockId flow.Identifier, - blockIdStr string, blockHeader *flow.Header, rawCollections []*entity.CompleteCollection, - systemTxnBody *flow.TransactionBody, - requestQueue chan TransactionRequest, - numTxns int, -) { + userTxCount int, +) chan TransactionRequest { + txQueue := make(chan TransactionRequest, userTxCount) + defer close(txQueue) + txnIndex := uint32(0) + blockIdStr := blockId.String() collectionCtx := fvm.NewContextFromParent( e.vmCtx, @@ -242,7 +277,7 @@ func (e *blockComputer) queueTransactionRequests( } for i, txnBody := range collection.Transactions { - requestQueue <- newTransactionRequest( + txQueue <- newTransactionRequest( collectionInfo, collectionCtx, collectionLogger, @@ -253,46 +288,48 @@ func (e *blockComputer) queueTransactionRequests( } } - systemCtx := fvm.NewContextFromParent( - e.systemChunkCtx, - fvm.WithBlockHeader(blockHeader), - fvm.WithProtocolStateSnapshot(e.protocolState.AtBlockID(blockId)), - ) - systemCollectionLogger := systemCtx.Logger.With(). - Str("block_id", blockIdStr). - Uint64("height", blockHeader.Height). - Bool("system_chunk", true). - Bool("system_transaction", true). - Int("num_collections", len(rawCollections)). - Int("num_txs", numTxns). - Logger() - systemCollectionInfo := collectionInfo{ - blockId: blockId, - blockIdStr: blockIdStr, - blockHeight: blockHeader.Height, - collectionIndex: len(rawCollections), - CompleteCollection: &entity.CompleteCollection{ - Transactions: []*flow.TransactionBody{systemTxnBody}, - }, - isSystemTransaction: true, - } - - requestQueue <- newTransactionRequest( - systemCollectionInfo, - systemCtx, - systemCollectionLogger, - txnIndex, - systemTxnBody, - true) + return txQueue } -func numberOfTransactionsInBlock(collections []*entity.CompleteCollection) int { - numTxns := 1 // there's one system transaction per block - for _, collection := range collections { - numTxns += len(collection.Transactions) +func (e *blockComputer) queueSystemTransactions( + callbackCtx fvm.Context, + systemChunkCtx fvm.Context, + systemColection collectionInfo, + systemTxn *flow.TransactionBody, + executeCallbackTxs []*flow.TransactionBody, + txnIndex uint32, + systemLogger zerolog.Logger, +) chan TransactionRequest { + allTxs := append(executeCallbackTxs, systemTxn) + // add execute callback transactions to the system collection info along to existing process transaction + systemTxs := systemColection.CompleteCollection.Transactions + systemColection.CompleteCollection.Transactions = append(systemTxs, allTxs...) + systemLogger = systemLogger.With().Uint32("num_txs", uint32(len(systemTxs))).Logger() + + txQueue := make(chan TransactionRequest, len(allTxs)) + defer close(txQueue) + + for i, txBody := range allTxs { + last := i == len(allTxs)-1 + ctx := callbackCtx + // last transaction is system chunk and has own context + if last { + ctx = systemChunkCtx + } + + txQueue <- newTransactionRequest( + systemColection, + ctx, + systemLogger, + txnIndex, + txBody, + last, + ) + + txnIndex++ } - return numTxns + return txQueue } // selectChunkConstructorForProtocolVersion selects a [flow.Chunk] constructor to @@ -330,31 +367,20 @@ func (e *blockComputer) executeBlock( return nil, fmt.Errorf("executable block start state is not set") } - blockId := block.ID() - blockIdStr := blockId.String() - rawCollections := block.Collections() + userTxCount := e.userTransactionsCount(rawCollections) blockSpan := e.tracer.StartSpanFromParent( - e.tracer.BlockRootSpan(blockId), + e.tracer.BlockRootSpan(block.ID()), trace.EXEComputeBlock) blockSpan.SetAttributes( - attribute.String("block_id", blockIdStr), + attribute.String("block_id", block.ID().String()), attribute.Int("collection_counts", len(rawCollections))) defer blockSpan.End() - systemTxn, err := blueprints.SystemChunkTransaction(e.vmCtx.Chain) - if err != nil { - return nil, fmt.Errorf( - "could not get system chunk transaction: %w", - err) - } - - numTxns := numberOfTransactionsInBlock(rawCollections) - // We temporarily support chunk models associated with both protocol versions 1 and 2. // TODO(mainnet27, #6773): remove this https://github.com/onflow/flow-go/issues/6773 - versionedChunkConstructor, err := e.selectChunkConstructorForProtocolVersion(blockId) + versionedChunkConstructor, err := e.selectChunkConstructorForProtocolVersion(block.ID()) if err != nil { return nil, fmt.Errorf("could not select chunk constructor for current protocol version: %w", err) } @@ -370,45 +396,38 @@ func (e *blockComputer) executeBlock( e.receiptHasher, parentBlockExecutionResultID, block, - numTxns, + e.maxConcurrency*2, // we add some buffer just in case result collection becomes slower than the execution e.colResCons, baseSnapshot, versionedChunkConstructor, ) defer collector.Stop() - requestQueue := make(chan TransactionRequest, numTxns) - database := newTransactionCoordinator( e.vm, baseSnapshot, derivedBlockData, collector) - e.queueTransactionRequests( - blockId, - blockIdStr, - block.Block.Header, + e.executeUserTransactions( + block, + blockSpan, + database, rawCollections, - systemTxn, - requestQueue, - numTxns, + userTxCount, ) - close(requestQueue) - - wg := &sync.WaitGroup{} - wg.Add(e.maxConcurrency) - for i := 0; i < e.maxConcurrency; i++ { - go e.executeTransactions( - blockSpan, - database, - requestQueue, - wg) + err = e.executeSystemTransactions( + block, + blockSpan, + database, + rawCollections, + userTxCount, + ) + if err != nil { + return nil, err } - wg.Wait() - err = database.Error() if err != nil { return nil, err @@ -428,6 +447,186 @@ func (e *blockComputer) executeBlock( return res, nil } +// executeUserTransactions executes the user transactions in the block. +// It queues the user transactions into a request queue and then executes them in parallel. +func (e *blockComputer) executeUserTransactions( + block *entity.ExecutableBlock, + blockSpan otelTrace.Span, + database *transactionCoordinator, + rawCollections []*entity.CompleteCollection, + userTxCount int, +) { + txQueue := e.queueUserTransactions( + block.ID(), + block.Block.Header, + rawCollections, + userTxCount, + ) + + e.executeQueue(blockSpan, database, txQueue) +} + +// executeSystemTransactions executes all system transactions in the block as part of the system collection. +// +// System transactions are executed in the following order: +// 1. system transaction that processes the scheduled callbacks which is a blocking transaction and +// the result is used for the next system transaction +// 2. system transactions that each execute a single scheduled callback by the ID obtained from events +// of the previous system transaction +// 3. system transaction that executes the system chunk +// +// An error can be returned if the process callback transaction fails. This is a fatal error. +func (e *blockComputer) executeSystemTransactions( + block *entity.ExecutableBlock, + blockSpan otelTrace.Span, + database *transactionCoordinator, + rawCollections []*entity.CompleteCollection, + userTxCount int, +) error { + userCollectionCount := len(rawCollections) + txIndex := uint32(userTxCount) + + callbackCtx := fvm.NewContextFromParent( + e.callbackCtx, + fvm.WithBlockHeader(block.Block.Header), + fvm.WithProtocolStateSnapshot(e.protocolState.AtBlockID(block.ID())), + ) + + systemChunkCtx := fvm.NewContextFromParent( + e.systemChunkCtx, + fvm.WithBlockHeader(block.Block.Header), + fvm.WithProtocolStateSnapshot(e.protocolState.AtBlockID(block.ID())), + ) + + systemLogger := callbackCtx.Logger.With(). + Str("block_id", block.ID().String()). + Uint64("height", block.Block.Header.Height). + Bool("system_chunk", true). + Bool("system_transaction", true). + Int("num_collections", userCollectionCount). + Logger() + + systemCollectionInfo := collectionInfo{ + blockId: block.ID(), + blockIdStr: block.ID().String(), + blockHeight: block.Block.Header.Height, + collectionIndex: len(rawCollections), + CompleteCollection: &entity.CompleteCollection{}, + isSystemTransaction: true, + } + + var callbackTxs []*flow.TransactionBody + + if e.vmCtx.ScheduleCallbacksEnabled { + callbacks, updatedTxnIndex, err := e.executeProcessCallback( + callbackCtx, + systemCollectionInfo, + database, + blockSpan, + txIndex, + systemLogger, + ) + if err != nil { + return err + } + + callbackTxs = callbacks + txIndex = updatedTxnIndex + } + + txQueue := e.queueSystemTransactions( + callbackCtx, + systemChunkCtx, + systemCollectionInfo, + e.systemTxn, + callbackTxs, + txIndex, + systemLogger, + ) + + e.executeQueue(blockSpan, database, txQueue) + + return nil +} + +// executeQueue executes the transactions in the request queue in parallel with the maxConcurrency workers. +func (e *blockComputer) executeQueue( + blockSpan otelTrace.Span, + database *transactionCoordinator, + txQueue chan TransactionRequest, +) { + wg := &sync.WaitGroup{} + wg.Add(e.maxConcurrency) + + for range e.maxConcurrency { + go e.executeTransactions( + blockSpan, + database, + txQueue, + wg) + } + + wg.Wait() +} + +// executeProcessCallback executes a transaction that calls callback scheduler contract process method. +// The execution result contains events that are emitted for each callback which is ready for execution. +// We use these events to prepare callback execution transactions, which are later executed as part of the system collection. +// An error can be returned if the process callback transaction fails. This is a fatal error. +func (e *blockComputer) executeProcessCallback( + systemCtx fvm.Context, + systemCollectionInfo collectionInfo, + database *transactionCoordinator, + blockSpan otelTrace.Span, + txnIndex uint32, + systemLogger zerolog.Logger, +) ([]*flow.TransactionBody, uint32, error) { + // add process callback transaction to the system collection info + systemCollectionInfo.CompleteCollection.Transactions = append(systemCollectionInfo.CompleteCollection.Transactions, e.processCallbackTxn) + + request := newTransactionRequest( + systemCollectionInfo, + systemCtx, + systemLogger, + txnIndex, + e.processCallbackTxn, + false) + + txnIndex++ + + txn, err := e.executeTransactionInternal(blockSpan, database, request, 0) + if err != nil { + snapshotTime := logical.Time(0) + if txn != nil { + snapshotTime = txn.SnapshotTime() + } + + return nil, 0, fmt.Errorf( + "failed to execute %s transaction %v (%d@%d) for block %s at height %v: %w", + "system", + request.txnIdStr, + request.txnIndex, + snapshotTime, + request.blockIdStr, + request.ctx.BlockHeader.Height, + err) + } + + if txn.Output().Err != nil { + return nil, 0, fmt.Errorf( + "process callback transaction %s error: %v", + request.txnIdStr, + txn.Output().Err) + } + + callbackTxs, err := blueprints.ExecuteCallbacksTransactions(e.vmCtx.Chain, txn.Output().Events) + if err != nil { + return nil, 0, err + } + + return callbackTxs, txnIndex, nil +} + func (e *blockComputer) executeTransactions( blockSpan otelTrace.Span, database *transactionCoordinator, diff --git a/engine/execution/computation/computer/computer_test.go b/engine/execution/computation/computer/computer_test.go index 39341b0d802..4403c6a8f5b 100644 --- a/engine/execution/computation/computer/computer_test.go +++ b/engine/execution/computation/computer/computer_test.go @@ -4,9 +4,14 @@ import ( "context" "fmt" "math/rand" + "strings" + "sync" "sync/atomic" "testing" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" "github.com/onflow/cadence" "github.com/onflow/cadence/common" "github.com/onflow/cadence/encoding/ccf" @@ -14,10 +19,6 @@ import ( "github.com/onflow/cadence/runtime" "github.com/onflow/cadence/sema" "github.com/onflow/cadence/stdlib" - - "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -607,7 +608,6 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { }) t.Run("system chunk transaction should not fail", func(t *testing.T) { - // include all fees. System chunk should ignore them contextOptions := []fvm.Option{ fvm.WithEVMEnabled(true), @@ -1596,6 +1596,337 @@ func Test_ExecutingSystemCollection(t *testing.T) { committer.AssertExpectations(t) } +func Test_ScheduledCallback(t *testing.T) { + t.Run("process with no scheduled callback", func(t *testing.T) { + testScheduledCallback(t, []cadence.Event{}, 2) // process callback + system chunk + }) + + t.Run("process with 2 scheduled callbacks", func(t *testing.T) { + // create callback events that process callback will return + location := common.NewAddressLocation(nil, common.Address(flow.HexToAddress("0x0000000000000000")), "CallbackScheduler") + + eventType := cadence.NewEventType( + location, + "CallbackProcessed", + []cadence.Field{ + {Identifier: "ID", Type: cadence.UInt64Type}, + {Identifier: "executionEffort", Type: cadence.UInt64Type}, + }, + nil, + ) + + callbackID1 := uint64(1) + callbackID2 := uint64(2) + + callbackEvent1 := cadence.NewEvent( + []cadence.Value{ + cadence.NewUInt64(callbackID1), + cadence.NewUInt64(1000), // execution effort + }, + ).WithType(eventType) + + callbackEvent2 := cadence.NewEvent( + []cadence.Value{ + cadence.NewUInt64(callbackID2), + cadence.NewUInt64(2000), // execution effort + }, + ).WithType(eventType) + + testScheduledCallback(t, []cadence.Event{callbackEvent1, callbackEvent2}, 4) // process callback + 2 callbacks + system chunk + }) +} + +func testScheduledCallback(t *testing.T, callbackEvents []cadence.Event, expectedTransactionCount int) { + rag := &RandomAddressGenerator{} + + executorID := unittest.IdentifierFixture() + + execCtx := fvm.NewContext( + fvm.WithScheduleCallbacksEnabled(true), // Enable callbacks + fvm.WithChain(flow.Localnet.Chain()), + ) + + // track which transactions were executed and their details + executedTransactions := make(map[string]string) + var executedTransactionsMutex sync.Mutex + + // encode events to create flow event payloads + eventPayloads := make([][]byte, len(callbackEvents)) + callbackIDs := make([]uint64, len(callbackEvents)) + for i, event := range callbackEvents { + payload, err := ccf.Encode(event) + require.NoError(t, err) + eventPayloads[i] = payload + + // extract callback ID from event for later comparison + if len(callbackEvents) > 0 { + decodedEvent, err := ccf.Decode(nil, payload) + require.NoError(t, err) + if cadenceEvent, ok := decodedEvent.(cadence.Event); ok { + // search for the ID field in the event + idField := cadence.SearchFieldByName(cadenceEvent, "ID") + if idValue, ok := idField.(cadence.UInt64); ok { + callbackIDs[i] = uint64(idValue) + } + } + } + } + + // create a VM that will track execution and return appropriate events + vm := &callbackTestVM{ + testVM: testVM{ + t: t, + eventsPerTransaction: 0, // we'll handle events manually + }, + executedTransactions: executedTransactions, + executedMutex: &executedTransactionsMutex, + eventPayloads: eventPayloads, + callbackIDs: callbackIDs, + } + + committer := &fakeCommitter{ + callCount: 0, + } + + me := new(modulemock.Local) + me.On("NodeID").Return(executorID) + me.On("Sign", mock.Anything, mock.Anything).Return(nil, nil) + me.On("SignFunc", mock.Anything, mock.Anything, mock.Anything). + Return(nil, nil) + + exemetrics := new(modulemock.ExecutionMetrics) + exemetrics.On("ExecutionBlockExecuted", + mock.Anything, + mock.Anything). + Return(nil). + Times(1) + + // expect 1 system collection execution + exemetrics.On("ExecutionCollectionExecuted", + mock.Anything, + mock.Anything). + Return(nil). + Times(1) + + // expect the specified number of transactions + exemetrics.On("ExecutionTransactionExecuted", + mock.Anything, + mock.MatchedBy(func(arg module.TransactionExecutionResultStats) bool { + return !arg.Failed && arg.SystemTransaction + }), + mock.Anything). + Return(nil). + Times(expectedTransactionCount) + + exemetrics.On( + "ExecutionChunkDataPackGenerated", + mock.Anything, + mock.Anything). + Return(nil). + Times(1) // system collection + + exemetrics.On( + "ExecutionBlockCachedPrograms", + mock.Anything). + Return(nil). + Times(1) + + bservice := requesterunit.MockBlobService(blockstore.NewBlockstore(dssync.MutexWrap(datastore.NewMapDatastore()))) + trackerStorage := mocktracker.NewMockStorage() + + prov := provider.NewProvider( + zerolog.Nop(), + metrics.NewNoopCollector(), + execution_data.DefaultSerializer, + bservice, + trackerStorage, + ) + + exe, err := computer.NewBlockComputer( + vm, + execCtx, + exemetrics, + trace.NewNoopTracer(), + zerolog.Nop(), + committer, + me, + prov, + nil, + testutil.ProtocolStateWithSourceFixture(nil), + testMaxConcurrency) + require.NoError(t, err) + + // create empty block (no user collections) + block := generateBlock(0, 0, rag) + + parentBlockExecutionResultID := unittest.IdentifierFixture() + result, err := exe.ExecuteBlock( + context.Background(), + parentBlockExecutionResultID, + block, + nil, + derived.NewEmptyDerivedBlockData(0)) + require.NoError(t, err) + + // verify execution results + assert.Len(t, result.AllExecutionSnapshots(), 1) // Only system chunk + assert.Len(t, result.AllTransactionResults(), expectedTransactionCount) + + // verify correct number of commits (1 for system collection) + assert.Equal(t, 1, committer.callCount) + assert.Equal(t, expectedTransactionCount, len(executedTransactions)) + + // verify we executed each type of transaction + hasProcessCallback := false + hasSystemChunk := false + callbackNames := make(map[string]bool) + + for _, txType := range executedTransactions { + switch txType { + case "process_callback": + hasProcessCallback = true + case "system_chunk": + hasSystemChunk = true + default: + if strings.HasPrefix(txType, "callback") { + // add unique callbacks to the map + callbackNames[txType] = true + } + } + } + + assert.True(t, hasProcessCallback, "process callback transaction should have been executed") + assert.True(t, hasSystemChunk, "system chunk transaction should have been executed") + assert.Equal(t, len(callbackEvents), len(callbackNames), "should have executed the expected number of callback transactions") + + // verify no transaction errors + for _, txResult := range result.AllTransactionResults() { + assert.Empty(t, txResult.ErrorMessage, "transaction should not have failed") + } + + // verify receipt structure + receipt := result.ExecutionReceipt + assert.Equal(t, executorID, receipt.ExecutorID) + assert.Equal(t, parentBlockExecutionResultID, receipt.PreviousResultID) + assert.Equal(t, block.ID(), receipt.BlockID) + assert.Len(t, receipt.Chunks, 1) // Only system chunk + + // verify system chunk details + systemChunk := receipt.Chunks[0] + assert.Equal(t, block.ID(), systemChunk.BlockID) + assert.Equal(t, uint(0), systemChunk.CollectionIndex) // System collection is at index 0 for empty block + assert.Equal(t, uint64(expectedTransactionCount), systemChunk.NumberOfTransactions) + + // verify all mocks were called as expected + exemetrics.AssertExpectations(t) +} + +// callbackTestVM is a custom VM for testing callback execution +type callbackTestVM struct { + testVM // Embed testVM + executedTransactions map[string]string + executedMutex *sync.Mutex + eventPayloads [][]byte + callbackIDs []uint64 +} + +func (vm *callbackTestVM) NewExecutor( + ctx fvm.Context, + proc fvm.Procedure, + txnState storage.TransactionPreparer, +) fvm.ProcedureExecutor { + // Create a custom executor that tracks execution and returns proper events + return &callbackTestExecutor{ + testExecutor: testExecutor{ + testVM: &vm.testVM, + ctx: ctx, + proc: proc, + txnState: txnState, + }, + vm: vm, + } +} + +// callbackTestExecutor is a custom executor for testing callback execution +type callbackTestExecutor struct { + testExecutor + vm *callbackTestVM +} + +// we need to reimplement this Output since the events are consumed in the block computer +// from the output of the procedure executor +func (executor *callbackTestExecutor) Output() fvm.ProcedureOutput { + executor.vm.executedMutex.Lock() + defer executor.vm.executedMutex.Unlock() + + txProc, ok := executor.proc.(*fvm.TransactionProcedure) + if !ok { + return fvm.ProcedureOutput{} + } + + txBody := txProc.Transaction + txID := fmt.Sprintf("tx_%d", txProc.TxIndex) + + switch { + // scheduled callbacks process transaction + case strings.Contains(string(txBody.Script), "scheduler.process"): + executor.vm.executedTransactions[txID] = "process_callback" + env := systemcontracts.SystemContractsForChain(flow.Mainnet.Chain().ChainID()).AsTemplateEnv() + eventTypeString := fmt.Sprintf("A.%v.CallbackScheduler.CallbackProcessed", env.FlowCallbackSchedulerAddress) + + // return events for each scheduled callback + events := make([]flow.Event, len(executor.vm.eventPayloads)) + for i, payload := range executor.vm.eventPayloads { + events[i] = flow.Event{ + // TODO: we shouldn't hardcode this event types, refactor after the scheduler contract is done + Type: flow.EventType(eventTypeString), + TransactionID: txProc.ID, + + TransactionIndex: txProc.TxIndex, + EventIndex: uint32(i), + Payload: payload, + } + } + + return fvm.ProcedureOutput{ + Events: events, + } + // scheduled callbacks execute transaction + case strings.Contains(string(txBody.Script), "scheduler.executeCallback"): + // extract the callback ID from the arguments + if len(txBody.Arguments) == 0 { + return fvm.ProcedureOutput{} + } + + // decode the argument to check which callback it is + argValue, err := ccf.Decode(nil, txBody.Arguments[0]) + if err == nil { + if idValue, ok := argValue.(cadence.UInt64); ok { + // find which callback this is + callbackIndex := -1 + for i, callbackID := range executor.vm.callbackIDs { + if uint64(idValue) == callbackID { + callbackIndex = i + break + } + } + + if callbackIndex >= 0 { + executor.vm.executedTransactions[txID] = fmt.Sprintf("callback%d", callbackIndex+1) + } else { + executor.vm.executedTransactions[txID] = "unknown_callback" + } + } + } + + return fvm.ProcedureOutput{} + // system chunk transaction + default: + executor.vm.executedTransactions[txID] = "system_chunk" + return fvm.ProcedureOutput{} + } +} + func generateBlock( collectionCount, transactionCount int, addressGenerator flow.AddressGenerator, diff --git a/engine/execution/computation/computer/result_collector.go b/engine/execution/computation/computer/result_collector.go index d22b1499bd0..0d180cdc40f 100644 --- a/engine/execution/computation/computer/result_collector.go +++ b/engine/execution/computation/computer/result_collector.go @@ -98,7 +98,7 @@ func newResultCollector( receiptHasher hash.Hasher, parentBlockExecutionResultID flow.Identifier, block *entity.ExecutableBlock, - numTransactions int, + inputChannelSize int, consumers []result.ExecutedCollectionConsumer, previousBlockSnapshot snapshot.StorageSnapshot, versionAwareChunkConstructor flow.ChunkConstructor, @@ -109,7 +109,7 @@ func newResultCollector( tracer: tracer, blockSpan: blockSpan, metrics: metrics, - processorInputChan: make(chan transactionResult, numTransactions), + processorInputChan: make(chan transactionResult, inputChannelSize), processorDoneChan: make(chan struct{}), committer: committer, signer: signer, diff --git a/engine/execution/ingestion/block_queue/queue.go b/engine/execution/ingestion/block_queue/queue.go index 02dfea97cd3..abb7cd119b8 100644 --- a/engine/execution/ingestion/block_queue/queue.go +++ b/engine/execution/ingestion/block_queue/queue.go @@ -48,12 +48,51 @@ type BlockQueue struct { blockIDsByHeight map[uint64]map[flow.Identifier]*entity.ExecutableBlock } +// MissingCollection stores a collection guarantee for which an Execution Node has not +// yet received the full collection. It is used for book-keeping while requesting collections. +// +//structwrite:immutable - mutations allowed only within the constructor type MissingCollection struct { BlockID flow.Identifier Height uint64 Guarantee *flow.CollectionGuarantee } +// UntrustedMissingCollection is an untrusted input-only representation of an MissingCollection, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedMissingCollection should be validated and converted into +// a trusted MissingCollection using NewMissingCollection constructor. +type UntrustedMissingCollection MissingCollection + +// NewMissingCollection creates a new instance of MissingCollection. +// Construction MissingCollection allowed only within the constructor +// +// All errors indicate a valid MissingCollection cannot be constructed from the input. +func NewMissingCollection(untrusted UntrustedMissingCollection) (*MissingCollection, error) { + if untrusted.BlockID == flow.ZeroID { + return nil, fmt.Errorf("BlockID must not be empty") + } + + if untrusted.Height == 0 { + return nil, fmt.Errorf("Height must not be zero") + } + + if untrusted.Guarantee == nil { + return nil, fmt.Errorf("CollectionGuarantee must not be empty") + } + + return &MissingCollection{ + BlockID: untrusted.BlockID, + Height: untrusted.Height, + Guarantee: untrusted.Guarantee, + }, nil +} + func (m *MissingCollection) ID() flow.Identifier { return m.Guarantee.ID() } @@ -178,7 +217,19 @@ func (q *BlockQueue) HandleBlock(block *flow.Block, parentFinalState *flow.State }, } - missingCollections = append(missingCollections, missingCollectionForBlock(executable, guarantee)) + missingCollection, err := NewMissingCollection(UntrustedMissingCollection{ + BlockID: executable.ID(), + Height: executable.Block.Header.Height, + Guarantee: col.Guarantee, + }) + if err != nil { + return nil, nil, fmt.Errorf("could not construct missingCollection: %w", err) + } + + missingCollections = append( + missingCollections, + missingCollection, + ) } } @@ -415,7 +466,7 @@ func (q *BlockQueue) checkIfChildBlockBecomeExecutable( // GetMissingCollections returns the missing collections and the start state for the given block // Useful for debugging what is missing for the next unexecuted block to become executable. -// It returns an error if the block is not found +// It returns an error if the block is not found or if could not construct missing collection. func (q *BlockQueue) GetMissingCollections(blockID flow.Identifier) ( []*MissingCollection, *flow.StateCommitment, @@ -434,16 +485,21 @@ func (q *BlockQueue) GetMissingCollections(blockID flow.Identifier) ( if col.IsCompleted() { continue } - missingCollections = append(missingCollections, missingCollectionForBlock(block, col.Guarantee)) - } - return missingCollections, block.StartState, nil -} + missingCollection, err := NewMissingCollection(UntrustedMissingCollection{ + BlockID: block.ID(), + Height: block.Block.Header.Height, + Guarantee: col.Guarantee, + }) + if err != nil { + return nil, nil, fmt.Errorf("could not construct missingCollection: %w", err) + } -func missingCollectionForBlock(block *entity.ExecutableBlock, guarantee *flow.CollectionGuarantee) *MissingCollection { - return &MissingCollection{ - BlockID: block.ID(), - Height: block.Block.Header.Height, - Guarantee: guarantee, + missingCollections = append( + missingCollections, + missingCollection, + ) } + + return missingCollections, block.StartState, nil } diff --git a/engine/execution/ingestion/block_queue/queue_test.go b/engine/execution/ingestion/block_queue/queue_test.go index e54be3eb574..e99ded21b0f 100644 --- a/engine/execution/ingestion/block_queue/queue_test.go +++ b/engine/execution/ingestion/block_queue/queue_test.go @@ -4,6 +4,7 @@ import ( "errors" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" @@ -406,6 +407,81 @@ func TestOnBlockWithMissingParentCommit(t *testing.T) { requireQueueIsEmpty(t, q) } +// TestNewMissingCollection verifies that NewMissingCollection constructs a valid MissingCollection +// when given complete, non-zero fields, and returns an error if any required field is missing. +// It covers: +// - valid missing collection creation +// - missing BlockID +// - zero Height +// - nil Guarantee +func TestNewMissingCollection(t *testing.T) { + height := uint64(10) + + t.Run("valid missing collection", func(t *testing.T) { + id := unittest.IdentifierFixture() + guarantee := unittest.CollectionGuaranteeFixture() + + uc := UntrustedMissingCollection{ + BlockID: id, + Height: height, + Guarantee: guarantee, + } + + mc, err := NewMissingCollection(uc) + assert.NoError(t, err) + assert.NotNil(t, mc) + assert.Equal(t, id, mc.BlockID) + assert.Equal(t, height, mc.Height) + assert.Equal(t, guarantee, mc.Guarantee) + }) + + t.Run("missing BlockID", func(t *testing.T) { + guarantee := unittest.CollectionGuaranteeFixture() + + uc := UntrustedMissingCollection{ + BlockID: flow.ZeroID, + Height: height, + Guarantee: guarantee, + } + + mc, err := NewMissingCollection(uc) + assert.Error(t, err) + assert.Nil(t, mc) + assert.Contains(t, err.Error(), "BlockID") + }) + + t.Run("zero Height", func(t *testing.T) { + id := unittest.IdentifierFixture() + guarantee := unittest.CollectionGuaranteeFixture() + + uc := UntrustedMissingCollection{ + BlockID: id, + Height: 0, + Guarantee: guarantee, + } + + mc, err := NewMissingCollection(uc) + assert.Error(t, err) + assert.Nil(t, mc) + assert.Contains(t, err.Error(), "Height") + }) + + t.Run("nil Guarantee", func(t *testing.T) { + id := unittest.IdentifierFixture() + + uc := UntrustedMissingCollection{ + BlockID: id, + Height: height, + Guarantee: nil, + } + + mc, err := NewMissingCollection(uc) + assert.Error(t, err) + assert.Nil(t, mc) + assert.Contains(t, err.Error(), "CollectionGuarantee") + }) +} + /* ==== Test utils ==== */ // GetBlock("A") => A diff --git a/engine/execution/ingestion/core.go b/engine/execution/ingestion/core.go index 62889ffc479..fb16fd12841 100644 --- a/engine/execution/ingestion/core.go +++ b/engine/execution/ingestion/core.go @@ -450,7 +450,7 @@ func storeCollectionIfMissing(collections storage.Collections, col *flow.Collect return fmt.Errorf("failed to get collection %v: %w", col.ID(), err) } - err := collections.Store(col) + _, err = collections.Store(col) if err != nil { return fmt.Errorf("failed to store collection %v: %w", col.ID(), err) } diff --git a/engine/execution/ingestion/core_test.go b/engine/execution/ingestion/core_test.go index 9fc2c824fa8..a854185aca0 100644 --- a/engine/execution/ingestion/core_test.go +++ b/engine/execution/ingestion/core_test.go @@ -167,7 +167,8 @@ func verifyBlockNotExecuted(t *testing.T, consumer *mockConsumer, blocks ...*flo func storeCollection(t *testing.T, collectionDB *mocks.MockCollectionStore, collection *flow.Collection) { log.Info().Msgf("collectionDB: store collection %v", collection.ID()) - require.NoError(t, collectionDB.Store(collection)) + _, err := collectionDB.Store(collection) + require.NoError(t, err) } func receiveCollection(t *testing.T, fetcher *mockFetcher, core *Core, collection *flow.Collection) { diff --git a/engine/execution/ingestion/mocks/collection_store.go b/engine/execution/ingestion/mocks/collection_store.go index 31e0c229a8b..fe8ef2a856e 100644 --- a/engine/execution/ingestion/mocks/collection_store.go +++ b/engine/execution/ingestion/mocks/collection_store.go @@ -3,10 +3,14 @@ package mocks import ( "fmt" + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" ) +var _ storage.Collections = (*MockCollectionStore)(nil) + type MockCollectionStore struct { byID map[flow.Identifier]*flow.Collection } @@ -25,9 +29,9 @@ func (m *MockCollectionStore) ByID(id flow.Identifier) (*flow.Collection, error) return c, nil } -func (m *MockCollectionStore) Store(c *flow.Collection) error { +func (m *MockCollectionStore) Store(c *flow.Collection) (flow.LightCollection, error) { m.byID[c.ID()] = c - return nil + return c.Light(), nil } func (m *MockCollectionStore) StoreLightAndIndexByTransaction(collection *flow.LightCollection) error { @@ -50,3 +54,15 @@ func (m *MockCollectionStore) LightByID(id flow.Identifier) (*flow.LightCollecti func (m *MockCollectionStore) LightByTransactionID(id flow.Identifier) (*flow.LightCollection, error) { panic("LightByTransactionID not implemented") } + +func (m *MockCollectionStore) BatchStoreLightAndIndexByTransaction(_ *flow.LightCollection, _ storage.ReaderBatchWriter) error { + panic("BatchStoreLightAndIndexByTransaction not implemented") +} + +func (m *MockCollectionStore) StoreAndIndexByTransaction(_ lockctx.Proof, collection *flow.Collection) (flow.LightCollection, error) { + panic("StoreAndIndexByTransaction not implemented") +} + +func (m *MockCollectionStore) BatchStoreAndIndexByTransaction(_ lockctx.Proof, collection *flow.Collection, batch storage.ReaderBatchWriter) (flow.LightCollection, error) { + panic("BatchStoreAndIndexByTransaction not implemented") +} diff --git a/engine/execution/messages.go b/engine/execution/messages.go index bae74a71027..6d27952129b 100644 --- a/engine/execution/messages.go +++ b/engine/execution/messages.go @@ -12,6 +12,8 @@ type ComputationResult struct { *flow.ExecutionReceipt } +// NewEmptyComputationResult creates an empty ComputationResult. +// Construction ComputationResult allowed only within the constructor. func NewEmptyComputationResult( block *entity.ExecutableBlock, versionAwareChunkConstructor flow.ChunkConstructor, diff --git a/engine/execution/migration/badgerpebble.go b/engine/execution/migration/badgerpebble.go deleted file mode 100644 index 068c387f87a..00000000000 --- a/engine/execution/migration/badgerpebble.go +++ /dev/null @@ -1,201 +0,0 @@ -package migration - -import ( - "errors" - "fmt" - - "github.com/cockroachdb/pebble" - "github.com/dgraph-io/badger/v2" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/engine/execution/state/bootstrap" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/block_iterator/latest" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/operation" - "github.com/onflow/flow-go/storage/operation/badgerimpl" - "github.com/onflow/flow-go/storage/operation/pebbleimpl" - "github.com/onflow/flow-go/storage/store" -) - -var ( - mainnet26SporkID flow.Identifier - testnet52SporkID flow.Identifier -) - -func init() { - var err error - - mainnet26SporkID, err = flow.HexStringToIdentifier("45894bde3f45dbfd89cab12be84e9172385da5079d40bf63979ca8a6a7ede741") - if err != nil { - panic(fmt.Sprintf("failed to parse Mainnet26SporkID: %v", err)) - } - - testnet52SporkID, err = flow.HexStringToIdentifier("5b88b81cfce2619305213489c2137f98e6efa0ec333dab2c31042b743388a3ce") - if err != nil { - panic(fmt.Sprintf("failed to parse Testnet52SporkID: %v", err)) - } -} - -// MigrateLastSealedExecutedResultToPebble run the migration to the pebble database, so that -// it has necessary data to be able execute the next block. -// the migration includes the following operations: -// 1. bootstrap the pebble database -// 2. copy execution data of the last sealed and executed block from badger to pebble. -// the execution data includes the execution result and statecommitment, which is the minimum data needed from the database -// to be able to continue executing the next block -func MigrateLastSealedExecutedResultToPebble(logger zerolog.Logger, badgerDB *badger.DB, pebbleDB *pebble.DB, state protocol.State, rootSeal *flow.Seal) error { - // only run the migration for mainnet26 and testnet52 - sporkID := state.Params().SporkID() - chainID := state.Params().ChainID() - if chainID == flow.Mainnet || chainID == flow.Testnet { - if sporkID != mainnet26SporkID && sporkID != testnet52SporkID { - logger.Warn().Msgf("spork ID %v is not Mainnet26SporkID %v or Testnet52SporkID %v, skip migration", - sporkID, mainnet26SporkID, testnet52SporkID) - return nil - } - } - - bdb := badgerimpl.ToDB(badgerDB) - pdb := pebbleimpl.ToDB(pebbleDB) - lg := logger.With().Str("module", "badger-pebble-migration").Logger() - - // bootstrap pebble database - bootstrapper := bootstrap.NewBootstrapper(logger) - commit, bootstrapped, err := bootstrapper.IsBootstrapped(pdb) - if err != nil { - return fmt.Errorf("could not query database to know whether database has been bootstrapped: %w", err) - } - - if !bootstrapped { - err = bootstrapper.BootstrapExecutionDatabase(pdb, rootSeal) - if err != nil { - return fmt.Errorf("could not bootstrap pebble execution database: %w", err) - } - } - - // get last sealed and executed block in badger - lastExecutedSealedHeightInBadger, err := latest.LatestSealedAndExecutedHeight(state, bdb) - if err != nil { - return fmt.Errorf("failed to get last executed sealed block: %w", err) - } - - // read all the data and save to pebble - header, err := state.AtHeight(lastExecutedSealedHeightInBadger).Head() - if err != nil { - return fmt.Errorf("failed to get block at height %d: %w", lastExecutedSealedHeightInBadger, err) - } - - blockID := header.ID() - - lg.Info().Msgf( - "migrating last executed and sealed block %v (%v) from badger to pebble", - header.Height, blockID) - - // create badger storage modules - badgerResults, badgerCommits := createStores(bdb) - // read data from badger - result, commit, err := readResultsForBlock(blockID, badgerResults, badgerCommits) - - if err != nil { - return fmt.Errorf("failed to read data from badger: %w", err) - } - - // create pebble storage modules - pebbleResults, pebbleCommits := createStores(pdb) - - var existingExecuted flow.Identifier - err = operation.RetrieveExecutedBlock(pdb.Reader(), &existingExecuted) - if err == nil { - // there is an executed block in pebble, compare if it's newer than the badger one, - // if newer, it means EN is storing new results in pebble, in this case, we don't - // want to update the executed block with the badger one. - - header, err := state.AtBlockID(existingExecuted).Head() - if err != nil { - return fmt.Errorf("failed to get block at height %d from badger: %w", lastExecutedSealedHeightInBadger, err) - } - - if header.Height > lastExecutedSealedHeightInBadger { - // existing executed in pebble is higher than badger, no need to store anything - // why? - // because the migration only copy the last sealed and executed block from badger to pebble, - // if EN is still storing new results in badger, then the existingExecuted in pebble will be the same as - // badger not higher. - // if EN is storing new results in pebble, then the existingExecuted in pebble will be higher than badger, - // in this case, we don't need to update the executed block in pebble. - lg.Info().Msgf("existing executed block %v in pebble is newer than %v in badger, skip update", - header.Height, lastExecutedSealedHeightInBadger) - return nil - } - - // otherwise continue to update last executed block in pebble - lg.Info().Msgf("existing executed block %v in pebble is older than %v in badger, update executed block", - header.Height, lastExecutedSealedHeightInBadger, - ) - } else if !errors.Is(err, storage.ErrNotFound) { - // exception - return fmt.Errorf("failed to retrieve executed block from pebble: %w", err) - } - - // store data to pebble in a batch update - err = pdb.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { - if err := pebbleResults.BatchStore(result, batch); err != nil { - return fmt.Errorf("failed to store receipt for block %s: %w", blockID, err) - } - - if err := pebbleResults.BatchIndex(blockID, result.ID(), batch); err != nil { - return fmt.Errorf("failed to index result for block %s: %w", blockID, err) - } - - if err := pebbleCommits.BatchStore(blockID, commit, batch); err != nil { - return fmt.Errorf("failed to store commit for block %s: %w", blockID, err) - } - - // two cases here: - // 1. no executed block in pebble - // in this case: set this block as last executed block - // 2. badger has newer executed block than pebble - // in this case: set this block as last executed block - if err := operation.UpdateExecutedBlock(batch.Writer(), blockID); err != nil { - return fmt.Errorf("failed to update executed block in pebble: %w", err) - } - - return nil - }) - - if err != nil { - return fmt.Errorf("failed to write data to pebble: %w", err) - } - - lg.Info().Msgf("migrated last executed and sealed block %v (%v) from badger to pebble", - header.Height, blockID) - - return nil -} - -func readResultsForBlock( - blockID flow.Identifier, - resultsStore storage.ExecutionResults, - commitsStore storage.Commits, -) (*flow.ExecutionResult, flow.StateCommitment, error) { - result, err := resultsStore.ByBlockID(blockID) - if err != nil { - return nil, flow.DummyStateCommitment, fmt.Errorf("failed to get receipt for block %s: %w", blockID, err) - } - - commit, err := commitsStore.ByBlockID(blockID) - if err != nil { - return nil, flow.DummyStateCommitment, fmt.Errorf("failed to get commit for block %s: %w", blockID, err) - } - return result, commit, nil -} - -func createStores(db storage.DB) (storage.ExecutionResults, storage.Commits) { - noop := metrics.NewNoopCollector() - results := store.NewExecutionResults(noop, db) - commits := store.NewCommits(noop, db) - return results, commits -} diff --git a/engine/execution/migration/badgerpebble_test.go b/engine/execution/migration/badgerpebble_test.go deleted file mode 100644 index 2ce47f62a14..00000000000 --- a/engine/execution/migration/badgerpebble_test.go +++ /dev/null @@ -1,228 +0,0 @@ -package migration - -import ( - "context" - "fmt" - "testing" - - "github.com/cockroachdb/pebble" - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/engine/execution/state" - "github.com/onflow/flow-go/engine/execution/state/bootstrap" - "github.com/onflow/flow-go/engine/execution/testutil" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/trace" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/state/protocol/invalid" - protocolmock "github.com/onflow/flow-go/state/protocol/mock" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/operation/badgerimpl" - "github.com/onflow/flow-go/storage/operation/pebbleimpl" - "github.com/onflow/flow-go/storage/store" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestMigrateLastSealedExecutedResultToPebble(t *testing.T) { - unittest.RunWithBadgerDBAndPebbleDB(t, func(bdb *badger.DB, pdb *pebble.DB) { - // bootstrap to init highest executed height - bootstrapper := bootstrap.NewBootstrapper(unittest.Logger()) - genesis := unittest.BlockHeaderFixture() - rootSeal := unittest.Seal.Fixture() - unittest.Seal.WithBlock(genesis)(rootSeal) - - db := badgerimpl.ToDB(bdb) - err := bootstrapper.BootstrapExecutionDatabase(db, rootSeal) - require.NoError(t, err) - - // create all modules - metrics := &metrics.NoopCollector{} - - headers := bstorage.NewHeaders(metrics, bdb) - txResults := store.NewTransactionResults(metrics, db, bstorage.DefaultCacheSize) - commits := store.NewCommits(metrics, db) - results := store.NewExecutionResults(metrics, db) - receipts := store.NewExecutionReceipts(metrics, db, results, bstorage.DefaultCacheSize) - myReceipts := store.NewMyExecutionReceipts(metrics, db, receipts) - events := store.NewEvents(metrics, db) - serviceEvents := store.NewServiceEvents(metrics, db) - transactions := bstorage.NewTransactions(metrics, bdb) - collections := bstorage.NewCollections(bdb, transactions) - chunkDataPacks := store.NewChunkDataPacks(metrics, pebbleimpl.ToDB(pdb), collections, bstorage.DefaultCacheSize) - - err = headers.Store(genesis) - require.NoError(t, err) - - getLatestFinalized := func() (uint64, error) { - return genesis.Height, nil - } - - // create execution state module - es := state.NewExecutionState( - nil, - commits, - nil, - headers, - chunkDataPacks, - results, - myReceipts, - events, - serviceEvents, - txResults, - db, - getLatestFinalized, - trace.NewNoopTracer(), - nil, - false, - ) - require.NotNil(t, es) - - executableBlock := unittest.ExecutableBlockFixtureWithParent( - nil, - genesis, - &unittest.GenesisStateCommitment) - header := executableBlock.Block.Header - - computationResult := testutil.ComputationResultFixture(t) - computationResult.ExecutableBlock = executableBlock - computationResult.ExecutionReceipt.ExecutionResult.BlockID = header.ID() - - commit := computationResult.CurrentEndState() - newexecutableBlock := unittest.ExecutableBlockFixtureWithParent( - nil, - header, - &commit) - newheader := newexecutableBlock.Block.Header - - err = headers.Store(header) - require.NoError(t, err) - - // save execution results - err = es.SaveExecutionResults(context.Background(), computationResult) - require.NoError(t, err) - - // read the saved results before migration - badgerResults, badgerCommits := createStores(badgerimpl.ToDB(bdb)) - bresult, bcommit, err := readResultsForBlock( - header.ID(), badgerResults, badgerCommits) - require.NoError(t, err) - - // mock that the executed block is the last executed and sealed block - ps := new(protocolmock.State) - params := new(protocolmock.Params) - params.On("SporkID").Return(mainnet26SporkID) - params.On("ChainID").Return(flow.Mainnet) - ps.On("Params").Return(params) - ps.On("AtHeight", mock.Anything).Return( - func(height uint64) protocol.Snapshot { - if height == header.Height { - return createSnapshot(header) - } else if height == newheader.Height { - return createSnapshot(newheader) - } - return invalid.NewSnapshot(fmt.Errorf("invalid height: %v", height)) - }) - ps.On("AtBlockID", mock.Anything).Return( - func(blockID flow.Identifier) protocol.Snapshot { - if blockID == header.ID() { - return createSnapshot(header) - } else if blockID == genesis.ID() { - return createSnapshot(genesis) - } else if blockID == newheader.ID() { - return createSnapshot(newheader) - } - return invalid.NewSnapshot(fmt.Errorf("invalid block: %v", blockID)) - }) - - sealed := header - - ps.On("Sealed", mock.Anything).Return(func() protocol.Snapshot { - return createSnapshot(sealed) - }) - - // run the migration - require.NoError(t, MigrateLastSealedExecutedResultToPebble(unittest.Logger(), bdb, pdb, ps, rootSeal)) - - // read the migrated results after migration - pebbleResults, pebbleCommits := createStores(pebbleimpl.ToDB(pdb)) - presult, pcommit, err := readResultsForBlock( - header.ID(), pebbleResults, pebbleCommits) - require.NoError(t, err) - - // compare the migrated results - require.Equal(t, bresult, presult) - require.Equal(t, bcommit, pcommit) - - // store a new block in pebble now, simulating new block executed after migration - pbdb := pebbleimpl.ToDB(pdb) - txResults = store.NewTransactionResults(metrics, pbdb, bstorage.DefaultCacheSize) - commits = store.NewCommits(metrics, pbdb) - results = store.NewExecutionResults(metrics, pbdb) - receipts = store.NewExecutionReceipts(metrics, pbdb, results, bstorage.DefaultCacheSize) - myReceipts = store.NewMyExecutionReceipts(metrics, pbdb, receipts) - events = store.NewEvents(metrics, pbdb) - serviceEvents = store.NewServiceEvents(metrics, pbdb) - - // create execution state module - newes := state.NewExecutionState( - nil, - commits, - nil, - headers, - chunkDataPacks, - results, - myReceipts, - events, - serviceEvents, - txResults, - db, - getLatestFinalized, - trace.NewNoopTracer(), - nil, - false, - ) - require.NotNil(t, es) - - err = headers.Store(newheader) - require.NoError(t, err) - - newcomputationResult := testutil.ComputationResultFixture(t) - newcomputationResult.ExecutableBlock = newexecutableBlock - newcomputationResult.ExecutionReceipt.ExecutionResult.BlockID = newheader.ID() - sealed = newheader - - // save execution results - err = newes.SaveExecutionResults(context.Background(), newcomputationResult) - require.NoError(t, err) - - bresult, bcommit, err = readResultsForBlock( - newheader.ID(), badgerResults, badgerCommits) - require.NoError(t, err) - - // run the migration - require.NoError(t, MigrateLastSealedExecutedResultToPebble(unittest.Logger(), bdb, pdb, ps, rootSeal)) - - // read the migrated results after migration - presult, pcommit, err = readResultsForBlock( - newheader.ID(), pebbleResults, pebbleCommits) - require.NoError(t, err) - - // compare the migrated results - require.Equal(t, bresult, presult) - require.Equal(t, bcommit, pcommit) - }) -} - -func createSnapshot(head *flow.Header) protocol.Snapshot { - snapshot := &protocolmock.Snapshot{} - snapshot.On("Head").Return( - func() *flow.Header { - return head - }, - nil, - ) - return snapshot -} diff --git a/engine/execution/pruner/core.go b/engine/execution/pruner/core.go index ac8d6e1faff..7040d80d1d1 100644 --- a/engine/execution/pruner/core.go +++ b/engine/execution/pruner/core.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/rs/zerolog" "github.com/onflow/flow-go/module" diff --git a/engine/execution/pruner/core_test.go b/engine/execution/pruner/core_test.go index d3c245ebd31..4f6139aa556 100644 --- a/engine/execution/pruner/core_test.go +++ b/engine/execution/pruner/core_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/require" @@ -14,8 +14,7 @@ import ( "github.com/onflow/flow-go/model/verification" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" - badgerstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/operation" "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/operation/pebbleimpl" "github.com/onflow/flow-go/storage/store" @@ -26,19 +25,23 @@ import ( func TestLoopPruneExecutionDataFromRootToLatestSealed(t *testing.T) { unittest.RunWithBadgerDB(t, func(bdb *badger.DB) { unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() // create dependencies ps := unittestMocks.NewProtocolState() blocks, rootResult, rootSeal := unittest.ChainFixture(0) genesis := blocks[0] require.NoError(t, ps.Bootstrap(genesis, rootResult, rootSeal)) + db := badgerimpl.ToDB(bdb) ctx, cancel := context.WithCancel(context.Background()) metrics := metrics.NewNoopCollector() - headers := badgerstorage.NewHeaders(metrics, bdb) - results := badgerstorage.NewExecutionResults(metrics, bdb) + all := store.InitAll(metrics, db) + headers := all.Headers + blockstore := all.Blocks + results := all.Results - transactions := badgerstorage.NewTransactions(metrics, bdb) - collections := badgerstorage.NewCollections(bdb, transactions) + transactions := store.NewTransactions(metrics, db) + collections := store.NewCollections(db, transactions) chunkDataPacks := store.NewChunkDataPacks(metrics, pebbleimpl.ToDB(pdb), collections, 1000) lastSealedHeight := 30 @@ -46,19 +49,36 @@ func TestLoopPruneExecutionDataFromRootToLatestSealed(t *testing.T) { // indexed by height chunks := make([]*verification.VerifiableChunkData, lastFinalizedHeight+2) parentID := genesis.ID() - require.NoError(t, headers.Store(genesis.Header)) + lctxGenesis := lockManager.NewContext() + require.NoError(t, lctxGenesis.AcquireLock(storage.LockInsertBlock)) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blockstore.BatchStore(lctxGenesis, rw, genesis) + })) + lctxGenesis.Release() + for i := 1; i <= lastFinalizedHeight; i++ { chunk, block := unittest.VerifiableChunkDataFixture(0, func(header *flow.Header) { header.Height = uint64(i) header.ParentID = parentID }) chunks[i] = chunk // index by height - require.NoError(t, headers.Store(chunk.Header)) - require.NoError(t, bdb.Update(operation.IndexBlockHeight(chunk.Header.Height, chunk.Header.ID()))) + lctxBlock := lockManager.NewContext() + require.NoError(t, lctxBlock.AcquireLock(storage.LockInsertBlock)) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blockstore.BatchStore(lctxBlock, rw, block) + })) + lctxBlock.Release() + lctxFinality := lockManager.NewContext() + require.NoError(t, lctxFinality.AcquireLock(storage.LockFinalizeBlock)) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctxFinality, rw, chunk.Header.Height, chunk.Header.ID()) + })) + lctxFinality.Release() require.NoError(t, results.Store(chunk.Result)) require.NoError(t, results.Index(chunk.Result.BlockID, chunk.Result.ID())) require.NoError(t, chunkDataPacks.Store([]*flow.ChunkDataPack{chunk.ChunkDataPack})) - require.NoError(t, collections.Store(chunk.ChunkDataPack.Collection)) + _, storeErr := collections.Store(chunk.ChunkDataPack.Collection) + require.NoError(t, storeErr) // verify that chunk data pack fixture can be found by the result for _, c := range chunk.Result.Chunks { chunkID := c.ID() @@ -76,8 +96,11 @@ func TestLoopPruneExecutionDataFromRootToLatestSealed(t *testing.T) { parentID = block.ID() } - // last seale and executed is the last sealed - require.NoError(t, bdb.Update(operation.InsertExecutedBlock(chunks[lastFinalizedHeight].Header.ID()))) + // update the index "latest executed block (max height)" to latest sealed block + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpdateExecutedBlock(rw.Writer(), chunks[lastFinalizedHeight].Header.ID()) + })) + lastSealed := chunks[lastSealedHeight].Header require.NoError(t, ps.MakeSeal(lastSealed.ID())) diff --git a/engine/execution/pruner/engine.go b/engine/execution/pruner/engine.go index f315dcc745e..c1d4e1a378a 100644 --- a/engine/execution/pruner/engine.go +++ b/engine/execution/pruner/engine.go @@ -1,7 +1,7 @@ package pruner import ( - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/rs/zerolog" "github.com/onflow/flow-go/module" diff --git a/engine/execution/state/bootstrap/bootstrap.go b/engine/execution/state/bootstrap/bootstrap.go index 59fce03aaf8..19305dc257d 100644 --- a/engine/execution/state/bootstrap/bootstrap.go +++ b/engine/execution/state/bootstrap/bootstrap.go @@ -5,7 +5,8 @@ import ( "errors" "fmt" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine/execution/state" @@ -92,12 +93,20 @@ func (b *Bootstrapper) IsBootstrapped(db storage.DB) (flow.StateCommitment, bool } func (b *Bootstrapper) BootstrapExecutionDatabase( + manager lockctx.Manager, db storage.DB, rootSeal *flow.Seal, ) error { + lctx := manager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertOwnReceipt) + if err != nil { + return err + } + commit := rootSeal.FinalState - err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { w := rw.Writer() err := operation.UpdateExecutedBlock(w, rootSeal.BlockID) if err != nil { @@ -109,12 +118,12 @@ func (b *Bootstrapper) BootstrapExecutionDatabase( return fmt.Errorf("could not index result for root result: %w", err) } - err = operation.IndexStateCommitment(w, flow.ZeroID, commit) + err = operation.IndexStateCommitment(lctx, rw, flow.ZeroID, commit) if err != nil { return fmt.Errorf("could not index void state commitment: %w", err) } - err = operation.IndexStateCommitment(w, rootSeal.BlockID, commit) + err = operation.IndexStateCommitment(lctx, rw, rootSeal.BlockID, commit) if err != nil { return fmt.Errorf("could not index genesis state commitment: %w", err) } @@ -127,12 +136,6 @@ func (b *Bootstrapper) BootstrapExecutionDatabase( return nil }) - - if err != nil { - return err - } - - return nil } func ImportRegistersFromCheckpoint(logger zerolog.Logger, checkpointFile string, checkpointHeight uint64, checkpointRootHash ledger.RootHash, pdb *pebble.DB, workerCount int) error { diff --git a/engine/execution/state/bootstrap/bootstrap_test.go b/engine/execution/state/bootstrap/bootstrap_test.go index 6d1fa7b0b3f..62aefc9a6dc 100644 --- a/engine/execution/state/bootstrap/bootstrap_test.go +++ b/engine/execution/state/bootstrap/bootstrap_test.go @@ -57,7 +57,7 @@ func TestBootstrapLedger(t *testing.T) { } func TestBootstrapLedger_ZeroTokenSupply(t *testing.T) { - expectedStateCommitmentBytes, _ := hex.DecodeString("67a94534546ce93fef87ee404849b6601620c9c8db1cd925defe93460ddf212c") + expectedStateCommitmentBytes, _ := hex.DecodeString("a278821691c3c112d263a3e4e13ff9c37c5b1ebb4d17510ed1c689afe6fa6241") expectedStateCommitment, err := flow.ToStateCommitment(expectedStateCommitmentBytes) require.NoError(t, err) @@ -104,7 +104,7 @@ func TestBootstrapLedger_ZeroTokenSupply(t *testing.T) { // - transaction fee deduction // This tests that the state commitment has not changed for the bookkeeping parts of the transaction. func TestBootstrapLedger_EmptyTransaction(t *testing.T) { - expectedStateCommitmentBytes, _ := hex.DecodeString("df1937164702f557d7457f8fdd6040a81dfbcbdbfb93ec187c3bf1cac414f5c0") + expectedStateCommitmentBytes, _ := hex.DecodeString("f4e318531ad48464e15dcd0fac520faedd9b31c7de2e326c4c944d2c9d5e56f0") expectedStateCommitment, err := flow.ToStateCommitment(expectedStateCommitmentBytes) require.NoError(t, err) diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index f2e3cdb2fbc..616646f75e6 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -7,6 +7,8 @@ import ( "math" "sync" + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/engine/execution" "github.com/onflow/flow-go/engine/execution/storehouse" "github.com/onflow/flow-go/fvm/storage/snapshot" @@ -54,6 +56,10 @@ type ScriptExecutionState interface { IsBlockExecuted(height uint64, blockID flow.Identifier) (bool, error) } +// IsParentExecuted returns true if and only if the parent of the given block (header) is executed. +// TODO: Check whether `header` is a root block is potentially flawed, because it only works for the genesis block. +// +// Neither spork root blocks nor dynamically boostrapped Execution Nodes (with truncated history) are supported. func IsParentExecuted(state ReadOnlyExecutionState, header *flow.Header) (bool, error) { // sanity check, caller should not pass a root block if header.Height == 0 { @@ -101,6 +107,7 @@ type state struct { transactionResults storage.TransactionResults db storage.DB getLatestFinalized func() (uint64, error) + lockManager lockctx.Manager registerStore execution.RegisterStore // when it is true, registers are stored in both register store and ledger @@ -125,6 +132,7 @@ func NewExecutionState( tracer module.Tracer, registerStore execution.RegisterStore, enableRegisterStore bool, + lockManager lockctx.Manager, ) ExecutionState { return &state{ tracer: tracer, @@ -142,6 +150,7 @@ func NewExecutionState( getLatestFinalized: getLatestFinalized, registerStore: registerStore, enableRegisterStore: enableRegisterStore, + lockManager: lockManager, } } @@ -337,7 +346,7 @@ func (s *state) StateCommitmentByBlockID(blockID flow.Identifier) (flow.StateCom func (s *state) ChunkDataPackByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPack, error) { chunkDataPack, err := s.chunkDataPacks.ByChunkID(chunkID) if err != nil { - return nil, fmt.Errorf("could not retrieve stored chunk data pack: %w", err) + return nil, fmt.Errorf("could not retrieve chunk data pack: %w", err) } return chunkDataPack, nil @@ -399,13 +408,18 @@ func (s *state) saveExecutionResults( return fmt.Errorf("can not store multiple chunk data pack: %w", err) } - // Write Batch is BadgerDB feature designed for handling lots of writes - // in efficient and atomic manner, hence pushing all the updates we can - // as tightly as possible to let Badger manage it. - // Note, that it does not guarantee atomicity as transactions has size limit, - // but it's the closest thing to atomicity we could have - return s.db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { + lctx := s.lockManager.NewContext() + defer lctx.Release() + err = lctx.AcquireLock(storage.LockInsertOwnReceipt) + if err != nil { + return err + } + // Save entire execution result (including all chunk data packs) within one batch to minimize + // the number of database interactions. This is a large batch of data, which might not be + // committed within a single operation (e.g. if using Badger DB as storage backend, which has + // a size limit for its transactions). + return s.db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { batch.AddCallback(func(err error) { // Rollback if an error occurs during batch operations if err != nil { @@ -438,7 +452,7 @@ func (s *state) saveExecutionResults( executionResult := &result.ExecutionReceipt.ExecutionResult // saving my receipts will also save the execution result - err = s.myReceipts.BatchStoreMyReceipt(result.ExecutionReceipt, batch) + err = s.myReceipts.BatchStoreMyReceipt(lctx, result.ExecutionReceipt, batch) if err != nil { return fmt.Errorf("could not persist execution result: %w", err) } @@ -451,14 +465,13 @@ func (s *state) saveExecutionResults( // the state commitment is the last data item to be stored, so that // IsBlockExecuted can be implemented by checking whether state commitment exists // in the database - err = s.commits.BatchStore(blockID, result.CurrentEndState(), batch) + err = s.commits.BatchStore(lctx, blockID, result.CurrentEndState(), batch) if err != nil { return fmt.Errorf("cannot store state commitment: %w", err) } return nil }) - } func (s *state) UpdateLastExecutedBlock(ctx context.Context, executedID flow.Identifier) error { diff --git a/engine/execution/state/state_storehouse_test.go b/engine/execution/state/state_storehouse_test.go index 480d2bbfb29..332e454b723 100644 --- a/engine/execution/state/state_storehouse_test.go +++ b/engine/execution/state/state_storehouse_test.go @@ -28,16 +28,17 @@ import ( "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" - badgerstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - storage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation" "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/pebble" "github.com/onflow/flow-go/utils/unittest" ) -func prepareStorehouseTest(f func(t *testing.T, es state.ExecutionState, l *ledger.Ledger, headers *storage.Headers, commits *storage.Commits, finalized *testutil.MockFinalizedReader)) func(*testing.T) { +func prepareStorehouseTest(f func(t *testing.T, es state.ExecutionState, l *ledger.Ledger, headers *storagemock.Headers, commits *storagemock.Commits, finalized *testutil.MockFinalizedReader)) func(*testing.T) { return func(t *testing.T) { + lockManager := storage.NewTestingLockManager() unittest.RunWithBadgerDB(t, func(badgerDB *badger.DB) { metricsCollector := &metrics.NoopCollector{} diskWal := &fixtures.NoopWAL{} @@ -50,22 +51,22 @@ func prepareStorehouseTest(f func(t *testing.T, es state.ExecutionState, l *ledg <-compactor.Done() }() - stateCommitments := storage.NewCommits(t) - stateCommitments.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(nil) - headers := storage.NewHeaders(t) - blocks := storage.NewBlocks(t) - events := storage.NewEvents(t) + stateCommitments := storagemock.NewCommits(t) + stateCommitments.On("BatchStore", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + headers := storagemock.NewHeaders(t) + blocks := storagemock.NewBlocks(t) + events := storagemock.NewEvents(t) events.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(nil) - serviceEvents := storage.NewServiceEvents(t) + serviceEvents := storagemock.NewServiceEvents(t) serviceEvents.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(nil) - txResults := storage.NewTransactionResults(t) + txResults := storagemock.NewTransactionResults(t) txResults.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(nil) - chunkDataPacks := storage.NewChunkDataPacks(t) + chunkDataPacks := storagemock.NewChunkDataPacks(t) chunkDataPacks.On("Store", mock.Anything).Return(nil) - results := storage.NewExecutionResults(t) + results := storagemock.NewExecutionResults(t) results.On("BatchIndex", mock.Anything, mock.Anything, mock.Anything).Return(nil) - myReceipts := storage.NewMyExecutionReceipts(t) - myReceipts.On("BatchStoreMyReceipt", mock.Anything, mock.Anything).Return(nil) + myReceipts := storagemock.NewMyExecutionReceipts(t) + myReceipts.On("BatchStoreMyReceipt", mock.Anything, mock.Anything, mock.Anything).Return(nil) withRegisterStore(t, func(t *testing.T, rs *storehouse.RegisterStore, @@ -78,13 +79,18 @@ func prepareStorehouseTest(f func(t *testing.T, es state.ExecutionState, l *ledg rootID, err := finalized.FinalizedBlockIDAtHeight(10) require.NoError(t, err) - require.NoError(t, - badgerDB.Update(operation.InsertExecutedBlock(rootID)), - ) - metrics := metrics.NewNoopCollector() - headersDB := badgerstorage.NewHeaders(metrics, badgerDB) - require.NoError(t, headersDB.Store(finalizedHeaders[10])) + db := badgerimpl.ToDB(badgerDB) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpdateExecutedBlock(rw.Writer(), rootID) + })) + + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertBlock)) + require.NoError(t, badgerimpl.ToDB(badgerDB).WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(lctx, rw, finalizedHeaders[10].ID(), finalizedHeaders[10]) + })) + lctx.Release() getLatestFinalized := func() (uint64, error) { return rootHeight, nil @@ -96,6 +102,7 @@ func prepareStorehouseTest(f func(t *testing.T, es state.ExecutionState, l *ledg trace.NewNoopTracer(), rs, true, + lockManager, ) f(t, es, ls, headers, stateCommitments, finalized) @@ -127,7 +134,7 @@ func withRegisterStore(t *testing.T, fn func( func TestExecutionStateWithStorehouse(t *testing.T) { t.Run("commit write and read new state", prepareStorehouseTest(func( - t *testing.T, es state.ExecutionState, l *ledger.Ledger, headers *storage.Headers, stateCommitments *storage.Commits, finalized *testutil.MockFinalizedReader) { + t *testing.T, es state.ExecutionState, l *ledger.Ledger, headers *storagemock.Headers, stateCommitments *storagemock.Commits, finalized *testutil.MockFinalizedReader) { // block 11 is the block to be executed block11 := finalized.BlockAtHeight(11) diff --git a/engine/execution/state/state_test.go b/engine/execution/state/state_test.go index 3b89124f4be..69fb51b81df 100644 --- a/engine/execution/state/state_test.go +++ b/engine/execution/state/state_test.go @@ -25,6 +25,7 @@ import ( func prepareTest(f func(t *testing.T, es state.ExecutionState, l *ledger.Ledger, headers *storage.Headers, commits *storage.Commits)) func(*testing.T) { return func(t *testing.T) { + lockManager := storageerr.NewTestingLockManager() unittest.RunWithBadgerDB(t, func(badgerDB *badger.DB) { metricsCollector := &metrics.NoopCollector{} diskWal := &fixtures.NoopWAL{} @@ -56,6 +57,7 @@ func prepareTest(f func(t *testing.T, es state.ExecutionState, l *ledger.Ledger, ls, stateCommitments, blocks, headers, chunkDataPacks, results, myReceipts, events, serviceEvents, txResults, db, getLatestFinalized, trace.NewNoopTracer(), nil, false, + lockManager, ) f(t, es, ls, headers, stateCommitments) diff --git a/engine/testutil/mock/nodes.go b/engine/testutil/mock/nodes.go index dfca196f23b..d95d35d712e 100644 --- a/engine/testutil/mock/nodes.go +++ b/engine/testutil/mock/nodes.go @@ -8,6 +8,7 @@ import ( "time" "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/stretchr/testify/require" @@ -58,6 +59,7 @@ type StateFixture struct { Storage *storage.All ProtocolEvents *events.Distributor State protocol.ParticipantState + LockManager lockctx.Manager } // GenericNode implements a generic in-process node for tests. @@ -72,6 +74,7 @@ type GenericNode struct { Tracer module.Tracer PublicDB *badger.DB SecretsDB *badger.DB + LockManager lockctx.Manager Headers storage.Headers Guarantees storage.Guarantees Seals storage.Seals diff --git a/engine/testutil/nodes.go b/engine/testutil/nodes.go index 661f5a7b49c..5917af59824 100644 --- a/engine/testutil/nodes.go +++ b/engine/testutil/nodes.go @@ -103,7 +103,8 @@ import ( "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/state/protocol/events/gadgets" "github.com/onflow/flow-go/state/protocol/util" - storage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + storagebadger "github.com/onflow/flow-go/storage/badger" "github.com/onflow/flow-go/storage/operation/badgerimpl" storagepebble "github.com/onflow/flow-go/storage/pebble" "github.com/onflow/flow-go/storage/store" @@ -198,6 +199,7 @@ func GenericNodeWithStateFixture(t testing.TB, Tracer: tracer, PublicDB: stateFixture.PublicDB, SecretsDB: stateFixture.SecretsDB, + LockManager: stateFixture.LockManager, Headers: stateFixture.Storage.Headers, Guarantees: stateFixture.Storage.Guarantees, Seals: stateFixture.Storage.Seals, @@ -231,14 +233,16 @@ func CompleteStateFixture( dataDir := unittest.TempDir(t) publicDBDir := filepath.Join(dataDir, "protocol") secretsDBDir := filepath.Join(dataDir, "secrets") - db := unittest.TypedBadgerDB(t, publicDBDir, storage.InitPublic) - s := storage.InitAll(metric, db) - secretsDB := unittest.TypedBadgerDB(t, secretsDBDir, storage.InitSecret) + db := unittest.TypedBadgerDB(t, publicDBDir, storagebadger.InitPublic) + lockManager := storage.NewTestingLockManager() + s := storagebadger.InitAll(metric, db) + secretsDB := unittest.TypedBadgerDB(t, secretsDBDir, storagebadger.InitSecret) consumer := events.NewDistributor() state, err := badgerstate.Bootstrap( metric, - db, + badgerimpl.ToDB(db), + lockManager, s.Headers, s.Seals, s.Results, @@ -273,12 +277,12 @@ func CompleteStateFixture( DBDir: dataDir, ProtocolEvents: consumer, State: mutableState, + LockManager: lockManager, } } // CollectionNode returns a mock collection node. func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, rootSnapshot protocol.Snapshot) testmock.CollectionNode { - node := GenericNode(t, hub, identity, rootSnapshot) privKeys, err := identity.PrivateKeys() require.NoError(t, err) @@ -289,9 +293,11 @@ func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ro func(_ uint64) mempool.Transactions { return herocache.NewTransactions(1000, node.Log, metrics.NewNoopCollector()) }) - transactions := storage.NewTransactions(node.Metrics, node.PublicDB) - collections := storage.NewCollections(node.PublicDB, transactions) - clusterPayloads := storage.NewClusterPayloads(node.Metrics, node.PublicDB) + + db := badgerimpl.ToDB(node.PublicDB) + transactions := store.NewTransactions(node.Metrics, db) + collections := store.NewCollections(db, transactions) + clusterPayloads := store.NewClusterPayloads(node.Metrics, db) ingestionEngine, err := collectioningest.New(node.Log, node.Net, node.State, node.Metrics, node.Metrics, node.Metrics, node.Me, node.ChainID.Chain(), pools, collectioningest.DefaultConfig(), ingest.NewAddressRateLimiter(rate.Limit(1), 10)) // 10 tps @@ -319,15 +325,17 @@ func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ro require.NoError(t, err) clusterStateFactory, err := factories.NewClusterStateFactory( - node.PublicDB, + db, + node.LockManager, node.Metrics, node.Tracer, ) require.NoError(t, err) builderFactory, err := factories.NewBuilderFactory( - node.PublicDB, + db, node.State, + node.LockManager, node.Headers, node.Tracer, node.Metrics, @@ -364,7 +372,7 @@ func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ro hotstuffFactory, err := factories.NewHotStuffFactory( node.Log, node.Me, - badgerimpl.ToDB(node.PublicDB), + db, node.State, node.Metrics, node.Metrics, @@ -431,7 +439,7 @@ func ConsensusNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ide db := badgerimpl.ToDB(node.PublicDB) resultsDB := store.NewExecutionResults(node.Metrics, db) - receiptsDB := store.NewExecutionReceipts(node.Metrics, db, resultsDB, storage.DefaultCacheSize) + receiptsDB := store.NewExecutionReceipts(node.Metrics, db, resultsDB, storagebadger.DefaultCacheSize) guarantees, err := stdmap.NewGuarantees(1000) require.NoError(t, err) @@ -448,7 +456,7 @@ func ConsensusNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ide require.NoError(t, err) // request receipts from execution nodes - receiptRequester, err := requester.New(node.Log, node.Metrics, node.Net, node.Me, node.State, channels.RequestReceiptsByBlockID, filter.Any, func() flow.Entity { return &flow.ExecutionReceipt{} }) + receiptRequester, err := requester.New(node.Log.With().Str("entity", "receipt").Logger(), node.Metrics, node.Net, node.Me, node.State, channels.RequestReceiptsByBlockID, filter.Any, func() flow.Entity { return &flow.ExecutionReceipt{} }) require.NoError(t, err) assigner, err := chunks.NewChunkAssigner(flow.DefaultChunkAssignmentAlpha, node.State) @@ -532,18 +540,18 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ide node := GenericNodeFromParticipants(t, hub, identity, identities, chainID) db := badgerimpl.ToDB(node.PublicDB) - transactionsStorage := storage.NewTransactions(node.Metrics, node.PublicDB) - collectionsStorage := storage.NewCollections(node.PublicDB, transactionsStorage) + transactionsStorage := store.NewTransactions(node.Metrics, db) + collectionsStorage := store.NewCollections(db, transactionsStorage) eventsStorage := store.NewEvents(node.Metrics, db) serviceEventsStorage := store.NewServiceEvents(node.Metrics, db) - txResultStorage := store.NewTransactionResults(node.Metrics, db, storage.DefaultCacheSize) + txResultStorage := store.NewTransactionResults(node.Metrics, db, storagebadger.DefaultCacheSize) commitsStorage := store.NewCommits(node.Metrics, db) chunkDataPackStorage := store.NewChunkDataPacks(node.Metrics, db, collectionsStorage, 100) results := store.NewExecutionResults(node.Metrics, db) - receipts := store.NewExecutionReceipts(node.Metrics, db, results, storage.DefaultCacheSize) + receipts := store.NewExecutionReceipts(node.Metrics, db, results, storagebadger.DefaultCacheSize) myReceipts := store.NewMyExecutionReceipts(node.Metrics, db, receipts) versionBeacons := store.NewVersionBeacons(db) - headersStorage := storage.NewHeaders(node.Metrics, node.PublicDB) + headersStorage := store.NewHeaders(node.Metrics, db) checkAuthorizedAtBlock := func(blockID flow.Identifier) (bool, error) { return protocol.IsNodeAuthorizedAt(node.State.AtBlockID(blockID), node.Me.NodeID()) @@ -609,7 +617,7 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ide require.Equal(t, fmt.Sprint(rootSeal.FinalState), fmt.Sprint(commit)) require.Equal(t, rootSeal.ResultID, rootResult.ID()) - err = bootstrapper.BootstrapExecutionDatabase(db, rootSeal) + err = bootstrapper.BootstrapExecutionDatabase(node.LockManager, db, rootSeal) require.NoError(t, err) registerDir := unittest.TempPebblePath(t) @@ -640,15 +648,17 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ide } return final.Height, nil } + execState := executionState.NewExecutionState( ls, commitsStorage, node.Blocks, node.Headers, chunkDataPackStorage, results, myReceipts, eventsStorage, serviceEventsStorage, txResultStorage, db, getLatestFinalized, node.Tracer, // TODO: test with register store registerStore, storehouseEnabled, + node.LockManager, ) requestEngine, err := requester.New( - node.Log, node.Metrics, node.Net, node.Me, node.State, + node.Log.With().Str("entity", "collection").Logger(), node.Metrics, node.Net, node.Me, node.State, channels.RequestCollections, filter.HasRole[flow.Identity](flow.RoleCollection), func() flow.Entity { return &flow.Collection{} }, @@ -849,12 +859,13 @@ func getRoot(t *testing.T, node *testmock.GenericNode) (*flow.Header, *flow.Quor signerIndices, err := signature.EncodeSignersToIndices(signerIDs, signerIDs) require.NoError(t, err) - rootQC := &flow.QuorumCertificate{ + rootQC, err := flow.NewQuorumCertificate(flow.UntrustedQuorumCertificate{ View: rootHead.View, BlockID: rootHead.ID(), SignerIndices: signerIndices, SigData: unittest.SignatureFixture(), - } + }) + require.NoError(t, err) return rootHead, rootQC } @@ -920,7 +931,7 @@ func createFollowerCore( rootHead *flow.Header, rootQC *flow.QuorumCertificate, ) (module.HotStuffFollower, *confinalizer.Finalizer) { - finalizer := confinalizer.NewFinalizer(node.PublicDB, node.Headers, followerState, trace.NewNoopTracer()) + finalizer := confinalizer.NewFinalizer(badgerimpl.ToDB(node.PublicDB).Reader(), node.Headers, followerState, trace.NewNoopTracer()) pending := make([]*flow.Header, 0) @@ -995,7 +1006,7 @@ func VerificationNode(t testing.TB, db := badgerimpl.ToDB(node.PublicDB) results := store.NewExecutionResults(node.Metrics, db) node.Results = results - node.Receipts = store.NewExecutionReceipts(node.Metrics, db, results, storage.DefaultCacheSize) + node.Receipts = store.NewExecutionReceipts(node.Metrics, db, results, storagebadger.DefaultCacheSize) } if node.ProcessedChunkIndex == nil { @@ -1013,7 +1024,6 @@ func VerificationNode(t testing.TB, if node.ProcessedBlockHeight == nil { node.ProcessedBlockHeight = store.NewConsumerProgress(badgerimpl.ToDB(node.PublicDB), module.ConsumeProgressVerificationBlockHeight) } - if node.VerifierEngine == nil { vm := fvm.NewVirtualMachine() @@ -1027,7 +1037,7 @@ func VerificationNode(t testing.TB, chunkVerifier := chunks.NewChunkVerifier(vm, vmCtx, node.Log) - approvalStorage := store.NewResultApprovals(node.Metrics, badgerimpl.ToDB(node.PublicDB)) + approvalStorage := store.NewResultApprovals(node.Metrics, badgerimpl.ToDB(node.PublicDB), node.LockManager) node.VerifierEngine, err = verifier.New(node.Log, collector, @@ -1036,7 +1046,9 @@ func VerificationNode(t testing.TB, node.State, node.Me, chunkVerifier, - approvalStorage) + approvalStorage, + node.LockManager, + ) require.NoError(t, err) } diff --git a/engine/verification/assigner/engine_test.go b/engine/verification/assigner/engine_test.go index c1e2edd3c00..871bcbe0130 100644 --- a/engine/verification/assigner/engine_test.go +++ b/engine/verification/assigner/engine_test.go @@ -162,7 +162,13 @@ func newBlockHappyPath(t *testing.T) { vertestutils.WithAssignee(t, s.myID()))) result := containerBlock.Payload.Results[0] s.mockStateAtBlockID(result.BlockID) - chunksNum := s.mockChunkAssigner(flow.NewIncorporatedResult(containerBlock.ID(), result), assignment) + + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: containerBlock.ID(), + Result: result, + }) + require.NoError(t, err) + chunksNum := s.mockChunkAssigner(incorporatedResult, assignment) require.Equal(t, chunksNum, 1) // one chunk should be assigned // mocks processing assigned chunks @@ -267,7 +273,12 @@ func newBlockNoChunk(t *testing.T) { containerBlock, assignment := createContainerBlock() result := containerBlock.Payload.Results[0] s.mockStateAtBlockID(result.BlockID) - chunksNum := s.mockChunkAssigner(flow.NewIncorporatedResult(containerBlock.ID(), result), assignment) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: containerBlock.ID(), + Result: result, + }) + require.NoError(t, err) + chunksNum := s.mockChunkAssigner(incorporatedResult, assignment) require.Equal(t, chunksNum, 0) // no chunk should be assigned // once assigner engine is done processing the block, it should notify the processing notifier. @@ -307,7 +318,12 @@ func newBlockNoAssignedChunk(t *testing.T) { vertestutils.WithAssignee(t, unittest.IdentifierFixture()))) // assigned to others result := containerBlock.Payload.Results[0] s.mockStateAtBlockID(result.BlockID) - chunksNum := s.mockChunkAssigner(flow.NewIncorporatedResult(containerBlock.ID(), result), assignment) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: containerBlock.ID(), + Result: result, + }) + require.NoError(t, err) + chunksNum := s.mockChunkAssigner(incorporatedResult, assignment) require.Equal(t, chunksNum, 0) // no chunk should be assigned // once assigner engine is done processing the block, it should notify the processing notifier. @@ -347,7 +363,12 @@ func newBlockMultipleAssignment(t *testing.T) { vertestutils.WithAssignee(t, s.myID()))) // assigned to me result := containerBlock.Payload.Results[0] s.mockStateAtBlockID(result.BlockID) - chunksNum := s.mockChunkAssigner(flow.NewIncorporatedResult(containerBlock.ID(), result), assignment) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: containerBlock.ID(), + Result: result, + }) + require.NoError(t, err) + chunksNum := s.mockChunkAssigner(incorporatedResult, assignment) require.Equal(t, chunksNum, 3) // 3 chunks should be assigned // mocks processing assigned chunks @@ -386,7 +407,12 @@ func chunkQueueUnhappyPathDuplicate(t *testing.T) { vertestutils.WithChunks(vertestutils.WithAssignee(t, s.myID()))) result := containerBlock.Payload.Results[0] s.mockStateAtBlockID(result.BlockID) - chunksNum := s.mockChunkAssigner(flow.NewIncorporatedResult(containerBlock.ID(), result), assignment) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: containerBlock.ID(), + Result: result, + }) + require.NoError(t, err) + chunksNum := s.mockChunkAssigner(incorporatedResult, assignment) require.Equal(t, chunksNum, 1) // mocks processing assigned chunks diff --git a/engine/verification/fetcher/chunkconsumer/consumer_test.go b/engine/verification/fetcher/chunkconsumer/consumer_test.go index 91f4ec23dbf..d7a23dbc877 100644 --- a/engine/verification/fetcher/chunkconsumer/consumer_test.go +++ b/engine/verification/fetcher/chunkconsumer/consumer_test.go @@ -14,7 +14,7 @@ import ( "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" - storage "github.com/onflow/flow-go/storage/badger" + storage "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" @@ -42,7 +42,7 @@ func TestProduceConsume(t *testing.T) { defer lock.Unlock() called = append(called, locator) } - WithConsumer(t, neverFinish, func(consumer *chunkconsumer.ChunkConsumer, chunksQueue *storage.ChunksQueue) { + WithConsumer(t, neverFinish, func(consumer *chunkconsumer.ChunkConsumer, chunksQueue storage.ChunksQueue) { <-consumer.Ready() locators := unittest.ChunkLocatorListFixture(10) @@ -78,7 +78,7 @@ func TestProduceConsume(t *testing.T) { finishAll.Done() }() } - WithConsumer(t, alwaysFinish, func(consumer *chunkconsumer.ChunkConsumer, chunksQueue *storage.ChunksQueue) { + WithConsumer(t, alwaysFinish, func(consumer *chunkconsumer.ChunkConsumer, chunksQueue storage.ChunksQueue) { <-consumer.Ready() locators := unittest.ChunkLocatorListFixture(10) @@ -113,7 +113,7 @@ func TestProduceConsume(t *testing.T) { finishAll.Done() }() } - WithConsumer(t, alwaysFinish, func(consumer *chunkconsumer.ChunkConsumer, chunksQueue *storage.ChunksQueue) { + WithConsumer(t, alwaysFinish, func(consumer *chunkconsumer.ChunkConsumer, chunksQueue storage.ChunksQueue) { <-consumer.Ready() total := atomic.NewUint32(0) @@ -141,13 +141,15 @@ func TestProduceConsume(t *testing.T) { func WithConsumer( t *testing.T, process func(module.ProcessingNotifier, *chunks.Locator), - withConsumer func(*chunkconsumer.ChunkConsumer, *storage.ChunksQueue), + withConsumer func(*chunkconsumer.ChunkConsumer, storage.ChunksQueue), ) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + unittest.RunWithBadgerDB(t, func(badgerdb *badger.DB) { maxProcessing := uint64(3) + db := badgerimpl.ToDB(badgerdb) - processedIndex := store.NewConsumerProgress(badgerimpl.ToDB(db), module.ConsumeProgressVerificationChunkIndex) - chunksQueue := storage.NewChunkQueue(db) + collector := &metrics.NoopCollector{} + processedIndex := store.NewConsumerProgress(db, module.ConsumeProgressVerificationChunkIndex) + chunksQueue := store.NewChunkQueue(collector, db) ok, err := chunksQueue.Init(chunkconsumer.DefaultJobIndex) require.NoError(t, err) require.True(t, ok) @@ -156,7 +158,6 @@ func WithConsumer( process: process, } - collector := &metrics.NoopCollector{} consumer, err := chunkconsumer.NewChunkConsumer( unittest.Logger(), collector, diff --git a/engine/verification/fetcher/engine.go b/engine/verification/fetcher/engine.go index b12ef5637a9..be3bf84203f 100644 --- a/engine/verification/fetcher/engine.go +++ b/engine/verification/fetcher/engine.go @@ -572,18 +572,23 @@ func (e *Engine) requestChunkDataPack(chunkIndex uint64, chunkID flow.Identifier return fmt.Errorf("could not fetch execution node ids at block %x: %w", blockID, err) } - request := &verification.ChunkDataPackRequest{ - Locator: chunks.Locator{ - ResultID: resultID, - Index: chunkIndex, - }, - ChunkDataPackRequestInfo: verification.ChunkDataPackRequestInfo{ - ChunkID: chunkID, - Height: header.Height, - Agrees: agrees, - Disagrees: disagrees, - Targets: allExecutors, + request, err := verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: chunks.Locator{ + ResultID: resultID, + Index: chunkIndex, + }, + ChunkDataPackRequestInfo: verification.ChunkDataPackRequestInfo{ + ChunkID: chunkID, + Height: header.Height, + Agrees: agrees, + Disagrees: disagrees, + Targets: allExecutors, + }, }, + ) + if err != nil { + return fmt.Errorf("could not construct chunk data pack request: %w", err) } e.requester.Request(request) diff --git a/engine/verification/fetcher/engine_test.go b/engine/verification/fetcher/engine_test.go index 273a76ac73f..9fa9d645bc3 100644 --- a/engine/verification/fetcher/engine_test.go +++ b/engine/verification/fetcher/engine_test.go @@ -880,9 +880,11 @@ func chunkDataPackResponseFixture(t *testing.T, ResultID: result.ID(), Index: chunk.Index, }, - Cdp: unittest.ChunkDataPackFixture(chunk.ID(), + Cdp: unittest.ChunkDataPackFixture( + chunk.ID(), unittest.WithStartState(chunk.StartState), - unittest.WithChunkDataPackCollection(collection)), + unittest.WithChunkDataPackCollection(collection), + ), } } @@ -953,11 +955,12 @@ func chunkRequestsFixture( // // Agrees and disagrees are the list of execution node identifiers that generate the same and contradicting execution result // with the execution result that chunks belong to, respectively. -func chunkRequestFixture(resultID flow.Identifier, +func chunkRequestFixture( + resultID flow.Identifier, status *verification.ChunkStatus, agrees flow.IdentityList, - disagrees flow.IdentityList) *verification.ChunkDataPackRequest { - + disagrees flow.IdentityList, +) *verification.ChunkDataPackRequest { return &verification.ChunkDataPackRequest{ Locator: chunks.Locator{ ResultID: resultID, diff --git a/engine/verification/requester/requester.go b/engine/verification/requester/requester.go index fddb922ec2e..f97d4f21832 100644 --- a/engine/verification/requester/requester.go +++ b/engine/verification/requester/requester.go @@ -198,12 +198,19 @@ func (e *Engine) handleChunkDataPack(originID flow.Identifier, chunkDataPack *fl } for _, locator := range locators { - response := verification.ChunkDataPackResponse{ - Locator: *locator, - Cdp: chunkDataPack, + response, err := verification.NewChunkDataPackResponse( + verification.UntrustedChunkDataPackResponse{ + Locator: *locator, + Cdp: chunkDataPack, + }, + ) + if err != nil { + // TODO: update this engine to use SignalerContext and throw an exception here + lg.Fatal().Err(err).Msg("could not construct chunk data pack response") + return } - e.handler.HandleChunkDataPack(originID, &response) + e.handler.HandleChunkDataPack(originID, response) e.metrics.OnChunkDataPackSentToFetcher() lg.Info(). Hex("result_id", logging.ID(locator.ResultID)). diff --git a/engine/verification/verifier/engine.go b/engine/verification/verifier/engine.go index 421c9651650..ba6f8b3adea 100644 --- a/engine/verification/verifier/engine.go +++ b/engine/verification/verifier/engine.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/jordanschalm/lockctx" "github.com/onflow/crypto" "github.com/onflow/crypto/hash" "github.com/rs/zerolog" @@ -43,6 +44,7 @@ type Engine struct { chVerif module.ChunkVerifier // used to verify chunks spockHasher hash.Hasher // used for generating spocks approvals storage.ResultApprovals // used to store result approvals + lockManager lockctx.Manager } // New creates and returns a new instance of a verifier engine. @@ -55,6 +57,7 @@ func New( me module.Local, chVerif module.ChunkVerifier, approvals storage.ResultApprovals, + lockManager lockctx.Manager, ) (*Engine, error) { e := &Engine{ @@ -68,6 +71,7 @@ func New( approvalHasher: utils.NewResultApprovalHasher(), spockHasher: signature.NewBLSHasher(signature.SPOCKTag), approvals: approvals, + lockManager: lockManager, } var err error @@ -265,10 +269,13 @@ func (e *Engine) verify(ctx context.Context, originID flow.Identifier, // Generate result approval span, _ = e.tracer.StartSpanFromContext(ctx, trace.VERVerGenerateResultApproval) - attestation := &flow.Attestation{ + attestation, err := flow.NewAttestation(flow.UntrustedAttestation{ BlockID: vc.Header.ID(), ExecutionResultID: vc.Result.ID(), ChunkIndex: vc.Chunk.Index, + }) + if err != nil { + return fmt.Errorf("could not build attestation: %w", err) } approval, err := GenerateResultApproval( e.me, @@ -282,16 +289,11 @@ func (e *Engine) verify(ctx context.Context, originID flow.Identifier, return fmt.Errorf("couldn't generate a result approval: %w", err) } - err = e.approvals.Store(approval) + err = e.storeApproval(approval) if err != nil { return fmt.Errorf("could not store approval: %w", err) } - err = e.approvals.Index(approval.Body.ExecutionResultID, approval.Body.ChunkIndex, approval.ID()) - if err != nil { - return fmt.Errorf("could not index approval: %w", err) - } - // Extracting consensus node ids // TODO state extraction should be done based on block references consensusNodes, err := e.state.Final(). @@ -314,6 +316,31 @@ func (e *Engine) verify(ctx context.Context, originID flow.Identifier, return nil } +// storeApproval stores the result approval in the database. +// Concurrency safe and guarantees that an approval for a result is never +// overwritten by a different one (enforcing protocol rule that Verifier +// must never publish two different approvals for the same chunk). +// No errors are expected during normal operations. +func (e *Engine) storeApproval(approval *flow.ResultApproval) error { + // create deferred operation for storing approval in the database + storing := e.approvals.StoreMyApproval(approval) + + lctx := e.lockManager.NewContext() + defer lctx.Release() + + err := lctx.AcquireLock(storage.LockIndexResultApproval) + if err != nil { + return fmt.Errorf("fail to acquire lock to insert result approval: %w", err) + } + + err = storing(lctx) + if err != nil { + return fmt.Errorf("could not store result approval: %w", err) + } + + return nil +} + // GenerateResultApproval generates result approval for specific chunk of an execution receipt. func GenerateResultApproval( me module.Local, @@ -337,11 +364,14 @@ func GenerateResultApproval( } // result approval body - body := flow.ResultApprovalBody{ + body, err := flow.NewResultApprovalBody(flow.UntrustedResultApprovalBody{ Attestation: *attestation, ApproverID: me.NodeID(), AttestationSignature: atstSign, Spock: spock, + }) + if err != nil { + return nil, fmt.Errorf("could not build result approval body: %w", err) } // generates a signature over result approval body @@ -351,10 +381,15 @@ func GenerateResultApproval( return nil, fmt.Errorf("could not sign result approval body: %w", err) } - return &flow.ResultApproval{ - Body: body, + resultApproval, err := flow.NewResultApproval(flow.UntrustedResultApproval{ + Body: *body, VerifierSignature: bodySign, - }, nil + }) + if err != nil { + return nil, fmt.Errorf("could not build result approval: %w", err) + } + + return resultApproval, nil } // verifiableChunkHandler acts as a wrapper around the verify method that captures its performance-related metrics diff --git a/engine/verification/verifier/engine_test.go b/engine/verification/verifier/engine_test.go index 35a8f2cdf2e..b9a5f9213b4 100644 --- a/engine/verification/verifier/engine_test.go +++ b/engine/verification/verifier/engine_test.go @@ -3,9 +3,12 @@ package verifier_test import ( "crypto/rand" "errors" + "sync/atomic" "testing" "github.com/ipfs/go-cid" + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/mock" testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -25,6 +28,7 @@ import ( "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/mocknetwork" protocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" mockstorage "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -44,6 +48,7 @@ type VerifierEngineTestSuite struct { metrics *mockmodule.VerificationMetrics // mocks performance monitoring metrics approvals *mockstorage.ResultApprovals chunkVerifier *mockmodule.ChunkVerifier + lockManager lockctx.Manager } func TestVerifierEngine(t *testing.T) { @@ -51,6 +56,7 @@ func TestVerifierEngine(t *testing.T) { } func (suite *VerifierEngineTestSuite) SetupTest() { + suite.lockManager = storage.NewTestingLockManager() suite.state = new(protocol.State) suite.net = mocknetwork.NewNetwork(suite.T()) suite.tracer = trace.NewNoopTracer() @@ -103,7 +109,9 @@ func (suite *VerifierEngineTestSuite) getTestNewEngine() *verifier.Engine { suite.state, suite.me, suite.chunkVerifier, - suite.approvals) + suite.approvals, + suite.lockManager, + ) require.NoError(suite.T(), err) suite.net.AssertExpectations(suite.T()) @@ -145,47 +153,30 @@ func (suite *VerifierEngineTestSuite) TestVerifyHappyPath() { for _, test := range tests { suite.Run(test.name, func() { - var expectedApproval *flow.ResultApproval + var expectedApproval atomic.Pointer[flow.ResultApproval] // potentially accessed concurrently within engine suite.approvals. - On("Store", testifymock.Anything). - Return(nil). - Run(func(args testifymock.Arguments) { - ra, ok := args[0].(*flow.ResultApproval) - suite.Require().True(ok) - - suite.Assert().Equal(vChunk.Chunk.BlockID, ra.Body.BlockID) - suite.Assert().Equal(vChunk.Result.ID(), ra.Body.ExecutionResultID) - suite.Assert().Equal(vChunk.Chunk.Index, ra.Body.ChunkIndex) - suite.Assert().Equal(suite.me.NodeID(), ra.Body.ApproverID) - - // verifies the signatures - atstID := ra.Body.Attestation.ID() - suite.Assert().True(suite.sk.PublicKey().Verify(ra.Body.AttestationSignature, atstID[:], suite.hasher)) - bodyID := ra.Body.ID() - suite.Assert().True(suite.sk.PublicKey().Verify(ra.VerifierSignature, bodyID[:], suite.hasher)) - - // spock should be non-nil - suite.Assert().NotNil(ra.Body.Spock) - - expectedApproval = ra - }). - Once() - suite.approvals. - On("Index", testifymock.Anything, testifymock.Anything, testifymock.Anything). - Return(nil). - Run(func(args testifymock.Arguments) { - erID, ok := args[0].(flow.Identifier) - suite.Require().True(ok) - suite.Assert().Equal(expectedApproval.Body.ExecutionResultID, erID) - - chIndex, ok := args[1].(uint64) - suite.Require().True(ok) - suite.Assert().Equal(expectedApproval.Body.ChunkIndex, chIndex) - - raID, ok := args[2].(flow.Identifier) - suite.Require().True(ok) - suite.Assert().Equal(expectedApproval.ID(), raID) + On("StoreMyApproval", mock.Anything). + Return(func(ra *flow.ResultApproval) func(lockctx.Proof) error { + return func(lctx lockctx.Proof) error { + suite.Assert().True(lctx.HoldsLock(storage.LockIndexResultApproval)) + suite.Assert().Equal(vChunk.Chunk.BlockID, ra.Body.BlockID) + suite.Assert().Equal(vChunk.Result.ID(), ra.Body.ExecutionResultID) + suite.Assert().Equal(vChunk.Chunk.Index, ra.Body.ChunkIndex) + suite.Assert().Equal(suite.me.NodeID(), ra.Body.ApproverID) + + // verifies the signatures + atstID := ra.Body.Attestation.ID() + suite.Assert().True(suite.sk.PublicKey().Verify(ra.Body.AttestationSignature, atstID[:], suite.hasher)) + bodyID := ra.Body.ID() + suite.Assert().True(suite.sk.PublicKey().Verify(ra.VerifierSignature, bodyID[:], suite.hasher)) + + // spock should be non-nil + suite.Assert().NotNil(ra.Body.Spock) + + expectedApproval.Store(ra) + return nil + } }). Once() @@ -196,7 +187,7 @@ func (suite *VerifierEngineTestSuite) TestVerifyHappyPath() { // check that the approval matches the input execution result ra, ok := args[0].(*flow.ResultApproval) suite.Require().True(ok) - suite.Assert().Equal(expectedApproval, ra) + suite.Assert().Equal(expectedApproval.Load(), ra) // note: mock includes each variadic argument as a separate element in slice node, ok := args[1].(flow.Identifier) diff --git a/engine/verification/verifier/verifiers.go b/engine/verification/verifier/verifiers.go index acf0fee84ad..48243dacf10 100644 --- a/engine/verification/verifier/verifiers.go +++ b/engine/verification/verifier/verifiers.go @@ -6,6 +6,7 @@ import ( "fmt" "sync" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -30,8 +31,17 @@ import ( // It assumes the latest sealed block has been executed, and the chunk data packs have not been // pruned. // Note, it returns nil if certain block is not executed, in this case warning will be logged -func VerifyLastKHeight(k uint64, chainID flow.ChainID, protocolDataDir string, chunkDataPackDir string, nWorker uint, stopOnMismatch bool) (err error) { - closer, storages, chunkDataPacks, state, verifier, err := initStorages(chainID, protocolDataDir, chunkDataPackDir) +func VerifyLastKHeight( + lockManager lockctx.Manager, + k uint64, + chainID flow.ChainID, + protocolDataDir string, + chunkDataPackDir string, + nWorker uint, + stopOnMismatch bool, + transactionFeesDisabled bool, +) (err error) { + closer, storages, chunkDataPacks, state, verifier, err := initStorages(lockManager, chainID, protocolDataDir, chunkDataPackDir, transactionFeesDisabled) if err != nil { return fmt.Errorf("could not init storages: %w", err) } @@ -78,13 +88,15 @@ func VerifyLastKHeight(k uint64, chainID flow.ChainID, protocolDataDir string, c // VerifyRange verifies all chunks in the results of the blocks in the given range. // Note, it returns nil if certain block is not executed, in this case warning will be logged func VerifyRange( + lockManager lockctx.Manager, from, to uint64, chainID flow.ChainID, protocolDataDir string, chunkDataPackDir string, nWorker uint, stopOnMismatch bool, + transactionFeesDisabled bool, ) (err error) { - closer, storages, chunkDataPacks, state, verifier, err := initStorages(chainID, protocolDataDir, chunkDataPackDir) + closer, storages, chunkDataPacks, state, verifier, err := initStorages(lockManager, chainID, protocolDataDir, chunkDataPackDir, transactionFeesDisabled) if err != nil { return fmt.Errorf("could not init storages: %w", err) } @@ -207,24 +219,33 @@ func verifyConcurrently( return nil } -func initStorages(chainID flow.ChainID, dataDir string, chunkDataPackDir string) ( +func initStorages( + lockManager lockctx.Manager, + chainID flow.ChainID, + dataDir string, + chunkDataPackDir string, + transactionFeesDisabled bool, +) ( func() error, - *storage.All, + *store.All, storage.ChunkDataPacks, protocol.State, module.ChunkVerifier, error, ) { - db := common.InitStorage(dataDir) + db, err := common.InitStorage(dataDir) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("could not init storage database: %w", err) + } storages := common.InitStorages(db) - state, err := common.InitProtocolState(db, storages) + state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("could not init protocol state: %w", err) + return nil, nil, nil, nil, nil, fmt.Errorf("could not open protocol state: %w", err) } // require the chunk data pack data must exist before returning the storage module - chunkDataPackDB, err := storagepebble.MustOpenDefaultPebbleDB( + chunkDataPackDB, err := storagepebble.ShouldOpenDefaultPebbleDB( log.Logger.With().Str("pebbledb", "cdp").Logger(), chunkDataPackDir) if err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("could not open chunk data pack DB: %w", err) @@ -232,7 +253,7 @@ func initStorages(chainID flow.ChainID, dataDir string, chunkDataPackDir string) chunkDataPacks := store.NewChunkDataPacks(metrics.NewNoopCollector(), pebbleimpl.ToDB(chunkDataPackDB), storages.Collections, 1000) - verifier := makeVerifier(log.Logger, chainID, storages.Headers) + verifier := makeVerifier(log.Logger, chainID, storages.Headers, transactionFeesDisabled) closer := func() error { var dbErr, chunkDataPackDBErr error @@ -304,10 +325,15 @@ func makeVerifier( logger zerolog.Logger, chainID flow.ChainID, headers storage.Headers, + transactionFeesDisabled bool, ) module.ChunkVerifier { vm := fvm.NewVirtualMachine() - fvmOptions := initialize.InitFvmOptions(chainID, headers) + fvmOptions := initialize.InitFvmOptions( + chainID, + headers, + transactionFeesDisabled, + ) fvmOptions = append( []fvm.Option{fvm.WithLogger(logger)}, fvmOptions..., diff --git a/follower/consensus_follower.go b/follower/consensus_follower.go index e226db22553..03bd451ec1e 100644 --- a/follower/consensus_follower.go +++ b/follower/consensus_follower.go @@ -5,8 +5,9 @@ import ( "fmt" "sync" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/onflow/crypto" "github.com/rs/zerolog" @@ -44,6 +45,9 @@ type Config struct { exposeMetrics bool // whether to expose metrics syncConfig *chainsync.Config // sync core configuration complianceConfig *compliance.Config // follower engine configuration + // lock manager for the follower, allows integration tests who run mulitple followers + // to be able to use different lock managers. + lockManager lockctx.Manager } type Option func(c *Config) @@ -91,6 +95,15 @@ func WithComplianceConfig(config *compliance.Config) Option { } } +func WithLockManager(lockManager lockctx.Manager) Option { + return func(c *Config) { + if c.lockManager != nil { + panic("lock manager already set, cannot overwrite") + } + c.lockManager = lockManager + } +} + // BootstrapNodeInfo contains the details about the upstream bootstrap peer the consensus follower uses type BootstrapNodeInfo struct { Host string // ip or hostname @@ -117,6 +130,7 @@ func getFollowerServiceOptions(config *Config) []FollowerOption { WithBootStrapPeers(ids...), WithBaseOptions(getBaseOptions(config)), WithNetworkKey(config.networkPrivKey), + WithStorageLockManager(config.lockManager), } } @@ -186,6 +200,7 @@ func NewConsensusFollower( pebbleDB: nil, logLevel: "info", exposeMetrics: false, + lockManager: nil, // default to nil, can be set optionally with WithLockManager in tests } for _, opt := range opts { diff --git a/follower/database/init.go b/follower/database/init.go index 92b85546dab..eaff70b085b 100644 --- a/follower/database/init.go +++ b/follower/database/init.go @@ -3,7 +3,7 @@ package database import ( "io" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/rs/zerolog" "github.com/onflow/flow-go/cmd/scaffold" diff --git a/follower/follower_builder.go b/follower/follower_builder.go index e6359392b67..1ce3b336ffc 100644 --- a/follower/follower_builder.go +++ b/follower/follower_builder.go @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "github.com/jordanschalm/lockctx" "github.com/libp2p/go-libp2p/core/peer" "github.com/onflow/crypto" "github.com/rs/zerolog" @@ -78,6 +79,7 @@ type FollowerServiceConfig struct { bootstrapIdentities flow.IdentitySkeletonList // the identity list of bootstrap peers the node uses to discover other nodes NetworkKey crypto.PrivateKey // the networking key passed in by the caller when being used as a library baseOptions []cmd.Option + lockManager lockctx.Manager // the lock manager used by the follower service, can be nil if not used } // DefaultFollowerServiceConfig defines all the default values for the FollowerServiceConfig @@ -199,7 +201,7 @@ func (builder *FollowerServiceBuilder) buildFollowerCore() *FollowerServiceBuild builder.Component("follower core", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // create a finalizer that will handle updating the protocol // state when the follower detects newly finalized blocks - final := finalizer.NewFinalizer(node.DB, node.Storage.Headers, builder.FollowerState, node.Tracer) + final := finalizer.NewFinalizer(node.ProtocolDB.Reader(), node.Storage.Headers, builder.FollowerState, node.Tracer) followerCore, err := consensus.NewFollower( node.Logger, @@ -329,6 +331,14 @@ func WithNetworkKey(key crypto.PrivateKey) FollowerOption { } } +func WithStorageLockManager(lockManager lockctx.Manager) FollowerOption { + return func(config *FollowerServiceConfig) { + // LockManager is not used in the follower service, but we keep this option for compatibility + // with the staked node builder. + config.lockManager = lockManager + } +} + func WithBaseOptions(baseOptions []cmd.Option) FollowerOption { return func(config *FollowerServiceConfig) { config.baseOptions = baseOptions @@ -350,6 +360,7 @@ func FlowConsensusFollowerService(opts ...FollowerOption) *FollowerServiceBuilde // the observer gets a version of the root snapshot file that does not contain any node addresses // hence skip all the root snapshot validations that involved an identity address ret.FlowNodeBuilder.SkipNwAddressBasedValidations = true + ret.StorageLockMgr = config.lockManager return ret } diff --git a/fvm/blueprints/epochs.go b/fvm/blueprints/epochs.go index cc274bd5671..93656d24559 100644 --- a/fvm/blueprints/epochs.go +++ b/fvm/blueprints/epochs.go @@ -131,7 +131,7 @@ func RegisterNodeTransaction( flowTokenAddress flow.Address, fungibleTokenAddress flow.Address, nodeAddress flow.Address, - id *flow.Identity, + node bootstrap.NodeInfo, ) *flow.TransactionBody { env := templates.Environment{ @@ -146,8 +146,8 @@ func RegisterNodeTransaction( // Use NetworkingKey as the public key of the machine account. // We do this for tests/localnet but normally it should be a separate key. accountKey := &flowsdk.AccountKey{ - PublicKey: id.NetworkPubKey, - SigAlgo: id.NetworkPubKey.Algorithm(), + PublicKey: node.NetworkPubKey(), + SigAlgo: node.NetworkPubKey().Algorithm(), HashAlgo: bootstrap.DefaultMachineAccountHashAlgo, Weight: 1000, } @@ -168,22 +168,31 @@ func RegisterNodeTransaction( panic(err) } - cdcNodeID, err := cadence.NewString(id.NodeID.String()) + cdcNodeID, err := cadence.NewString(node.NodeID.String()) if err != nil { panic(err) } - cdcAddress, err := cadence.NewString(id.Address) + cdcAddress, err := cadence.NewString(node.Address) if err != nil { panic(err) } - cdcNetworkPubKey, err := cadence.NewString(id.NetworkPubKey.String()[2:]) + cdcNetworkPubKey, err := cadence.NewString(node.NetworkPubKey().String()[2:]) if err != nil { panic(err) } - cdcStakingPubKey, err := cadence.NewString(id.StakingPubKey.String()[2:]) + cdcStakingPubKey, err := cadence.NewString(node.StakingPubKey().String()[2:]) + if err != nil { + panic(err) + } + + pop, err := node.StakingPoP() + if err != nil { + panic(err) + } + cdcStakingKeyPoP, err := cadence.NewString(pop.String()[2:]) if err != nil { panic(err) } @@ -192,10 +201,11 @@ func RegisterNodeTransaction( return flow.NewTransactionBody(). SetScript(templates.GenerateEpochRegisterNodeScript(env)). AddArgument(jsoncdc.MustEncode(cdcNodeID)). - AddArgument(jsoncdc.MustEncode(cadence.NewUInt8(uint8(id.Role)))). + AddArgument(jsoncdc.MustEncode(cadence.NewUInt8(uint8(node.Role)))). AddArgument(jsoncdc.MustEncode(cdcAddress)). AddArgument(jsoncdc.MustEncode(cdcNetworkPubKey)). AddArgument(jsoncdc.MustEncode(cdcStakingPubKey)). + AddArgument(jsoncdc.MustEncode(cdcStakingKeyPoP)). AddArgument(jsoncdc.MustEncode(cdcAmount)). AddArgument(jsoncdc.MustEncode(cadencePublicKeys)). AddAuthorizer(nodeAddress) diff --git a/fvm/blueprints/scheduled_callback.go b/fvm/blueprints/scheduled_callback.go new file mode 100644 index 00000000000..e4848a9af4b --- /dev/null +++ b/fvm/blueprints/scheduled_callback.go @@ -0,0 +1,115 @@ +package blueprints + +import ( + "fmt" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-core-contracts/lib/go/templates" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" +) + +const callbackTransactionGasLimit = flow.DefaultMaxTransactionGasLimit + +func ProcessCallbacksTransaction(chain flow.Chain) *flow.TransactionBody { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + script := templates.GenerateProcessCallbackScript(sc.AsTemplateEnv()) + + return flow.NewTransactionBody(). + SetScript(script). + SetComputeLimit(callbackTransactionGasLimit) +} + +func ExecuteCallbacksTransactions(chainID flow.Chain, processEvents flow.EventsList) ([]*flow.TransactionBody, error) { + txs := make([]*flow.TransactionBody, 0, len(processEvents)) + env := systemcontracts.SystemContractsForChain(chainID.ChainID()).AsTemplateEnv() + + for _, event := range processEvents { + id, effort, err := callbackArgsFromEvent(env, event) + if err != nil { + return nil, fmt.Errorf("failed to get callback args from event: %w", err) + } + + tx := executeCallbackTransaction(env, id, effort) + txs = append(txs, tx) + } + + return txs, nil +} + +func executeCallbackTransaction(env templates.Environment, id []byte, effort uint64) *flow.TransactionBody { + script := templates.GenerateExecuteCallbackScript(env) + + return flow.NewTransactionBody(). + SetScript(script). + AddArgument(id). + SetComputeLimit(effort) +} + +// callbackArgsFromEvent decodes the event payload and returns the callback ID and effort. +// +// The event for processed callback event is emitted by the process callback transaction from +// callback scheduler contract and has the following signature: +// event CallbackProcessed(ID: UInt64, executionEffort: UInt64) +func callbackArgsFromEvent(env templates.Environment, event flow.Event) ([]byte, uint64, error) { + const ( + processedCallbackIDFieldName = "ID" + processedCallbackEffortFieldName = "executionEffort" + processedEventTypeTemplate = "A.%v.CallbackScheduler.CallbackProcessed" + ) + + scheduledContractAddress := env.FlowCallbackSchedulerAddress + processedEventType := flow.EventType(fmt.Sprintf(processedEventTypeTemplate, scheduledContractAddress)) + + if event.Type != processedEventType { + return nil, 0, fmt.Errorf("wrong event type is passed") + } + + eventData, err := ccf.Decode(nil, event.Payload) + if err != nil { + return nil, 0, fmt.Errorf("failed to decode event: %w", err) + } + + cadenceEvent, ok := eventData.(cadence.Event) + if !ok { + return nil, 0, fmt.Errorf("event data is not a cadence event") + } + + idValue := cadence.SearchFieldByName( + cadenceEvent, + processedCallbackIDFieldName, + ) + + effortValue := cadence.SearchFieldByName( + cadenceEvent, + processedCallbackEffortFieldName, + ) + + id, ok := idValue.(cadence.UInt64) + if !ok { + return nil, 0, fmt.Errorf("id is not uint64") + } + + cadenceEffort, ok := effortValue.(cadence.UInt64) + if !ok { + return nil, 0, fmt.Errorf("effort is not uint64") + } + + effort := uint64(cadenceEffort) + + if effort > flow.DefaultMaxTransactionGasLimit { + log.Warn().Uint64("effort", effort).Msg("effort is greater than max transaction gas limit, setting to max") + effort = flow.DefaultMaxTransactionGasLimit + } + + encodedID, err := ccf.Encode(id) + if err != nil { + return nil, 0, fmt.Errorf("failed to encode id: %w", err) + } + + return encodedID, effort, nil +} diff --git a/fvm/blueprints/scheduled_callback_test.go b/fvm/blueprints/scheduled_callback_test.go new file mode 100644 index 00000000000..50a37f117c0 --- /dev/null +++ b/fvm/blueprints/scheduled_callback_test.go @@ -0,0 +1,192 @@ +package blueprints_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + cadenceCommon "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" + + "github.com/onflow/flow-go/fvm/blueprints" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestProcessCallbacksTransaction(t *testing.T) { + t.Parallel() + + chain := flow.Mainnet.Chain() + tx := blueprints.ProcessCallbacksTransaction(chain) + + assert.NotNil(t, tx) + assert.NotEmpty(t, tx.Script) + assert.Equal(t, uint64(flow.DefaultMaxTransactionGasLimit), tx.GasLimit) + assert.Empty(t, tx.Arguments) +} + +func TestExecuteCallbacksTransactions(t *testing.T) { + t.Parallel() + + chain := flow.Mainnet.Chain() + + tests := []struct { + name string + events []flow.Event + expectedTxs int + expectError bool + errorMessage string + }{ + { + name: "no events", + events: []flow.Event{}, + expectedTxs: 0, + expectError: false, + }, + { + name: "single valid event", + events: []flow.Event{createValidCallbackEvent(t, 1, 100)}, + expectedTxs: 1, + expectError: false, + }, + { + name: "multiple valid events", + events: []flow.Event{ + createValidCallbackEvent(t, 1, 100), + createValidCallbackEvent(t, 2, 200), + createValidCallbackEvent(t, 3, 300), + }, + expectedTxs: 3, + expectError: false, + }, + { + name: "invalid event type", + events: []flow.Event{createInvalidTypeEvent()}, + expectedTxs: 0, + expectError: true, + errorMessage: "failed to get callback args from event", + }, + { + name: "invalid event payload", + events: []flow.Event{createInvalidPayloadEvent()}, + expectedTxs: 0, + expectError: true, + errorMessage: "failed to get callback args from event", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + txs, err := blueprints.ExecuteCallbacksTransactions(chain, tt.events) + + if tt.expectError { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMessage) + assert.Nil(t, txs) + return + } + + assert.NoError(t, err) + assert.Len(t, txs, tt.expectedTxs) + + for i, tx := range txs { + assert.NotNil(t, tx) + assert.NotEmpty(t, tx.Script) + expectedEffort := uint64(100 * (i + 1)) // Events created with efforts 100, 200, 300 + assert.Equal(t, expectedEffort, tx.GasLimit) + assert.Len(t, tx.Arguments, 1) + assert.NotEmpty(t, tx.Arguments[0]) + + t.Logf("Transaction %d: ID arg length: %d, GasLimit: %d", + i, len(tx.Arguments[0]), tx.GasLimit) + } + }) + } +} + +func TestExecuteCallbackTransaction(t *testing.T) { + t.Parallel() + + chain := flow.Mainnet.Chain() + + const id = 123 + const effort = 456 + event := createValidCallbackEvent(t, id, effort) + txs, err := blueprints.ExecuteCallbacksTransactions(chain, []flow.Event{event}) + + require.NoError(t, err) + require.Len(t, txs, 1) + + tx := txs[0] + assert.NotNil(t, tx) + assert.NotEmpty(t, tx.Script) + assert.Equal(t, uint64(effort), tx.GasLimit) + assert.Len(t, tx.Arguments, 1) + + expectedEncodedID, err := ccf.Encode(cadence.NewUInt64(id)) + require.NoError(t, err) + assert.Equal(t, tx.Arguments[0], expectedEncodedID) + + assert.Equal(t, tx.GasLimit, uint64(effort)) +} + +func createValidCallbackEvent(t *testing.T, id uint64, effort uint64) flow.Event { + const processedEventTypeTemplate = "A.%v.CallbackScheduler.CallbackProcessed" + env := systemcontracts.SystemContractsForChain(flow.Mainnet.Chain().ChainID()).AsTemplateEnv() + eventTypeString := fmt.Sprintf(processedEventTypeTemplate, env.FlowCallbackSchedulerAddress) + loc, err := cadenceCommon.HexToAddress(env.FlowCallbackSchedulerAddress) + require.NoError(t, err) + location := cadenceCommon.NewAddressLocation(nil, loc, "CallbackProcessed") + + eventType := cadence.NewEventType( + location, + "CallbackProcessed", + []cadence.Field{ + {Identifier: "ID", Type: cadence.UInt64Type}, + {Identifier: "executionEffort", Type: cadence.UInt64Type}, + }, + nil, + ) + + event := cadence.NewEvent( + []cadence.Value{ + cadence.NewUInt64(id), + cadence.NewUInt64(effort), + }, + ).WithType(eventType) + + payload, err := ccf.Encode(event) + require.NoError(t, err) + + return flow.Event{ + Type: flow.EventType(eventTypeString), + TransactionID: unittest.IdentifierFixture(), + TransactionIndex: 0, + EventIndex: 0, + Payload: payload, + } +} + +func createInvalidTypeEvent() flow.Event { + return flow.Event{ + Type: flow.EventType("A.0000000000000000.SomeContract.WrongEvent"), + TransactionID: unittest.IdentifierFixture(), + TransactionIndex: 0, + EventIndex: 0, + Payload: []byte("invalid"), + } +} + +func createInvalidPayloadEvent() flow.Event { + return flow.Event{ + Type: flow.EventType("A.0000000000000000.CallbackScheduler.CallbackProcessed"), + TransactionID: unittest.IdentifierFixture(), + TransactionIndex: 0, + EventIndex: 0, + Payload: []byte("not valid ccf"), + } +} diff --git a/fvm/bootstrap.go b/fvm/bootstrap.go index b7d5df580da..60ef874f183 100644 --- a/fvm/bootstrap.go +++ b/fvm/bootstrap.go @@ -22,6 +22,7 @@ import ( "github.com/onflow/flow-go/fvm/storage" "github.com/onflow/flow-go/fvm/storage/logical" "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/epochs" ) @@ -109,7 +110,7 @@ type BootstrapParams struct { // list of initial network participants for whom we will create/stake flow // accounts and retrieve epoch-related resources - identities flow.IdentityList + nodes []bootstrap.NodeInfo } type BootstrapAccountKeys struct { @@ -200,9 +201,9 @@ func WithRootBlock(rootBlock *flow.Header) BootstrapProcedureOption { } } -func WithIdentities(identities flow.IdentityList) BootstrapProcedureOption { +func WithNodes(nodes []bootstrap.NodeInfo) BootstrapProcedureOption { return func(bp *BootstrapProcedure) *BootstrapProcedure { - bp.identities = identities + bp.nodes = nodes return bp } } @@ -453,6 +454,11 @@ func (b *bootstrapExecutor) Execute() error { // deploy staking collection contract to the service account b.deployStakingCollection(service, &env) + if b.ctx.ScheduleCallbacksEnabled { + // deploy flow callback scheduler contract to the service account + b.deployFlowCallbackScheduler(service, &env) + } + // sets up the EVM environment b.setupEVM(service, nonFungibleToken, fungibleToken, flowToken, &env) b.setupVMBridge(service, &env) @@ -467,7 +473,7 @@ func (b *bootstrapExecutor) Execute() error { b.registerNodes(service, fungibleToken, flowToken) // set the list of nodes which are allowed to stake in this network - b.setStakingAllowlist(service, b.identities.NodeIDs()) + b.setStakingAllowlist(service, bootstrap.ToIdentityList(b.nodes).NodeIDs()) b.deployMigrationContract(service) @@ -805,6 +811,16 @@ func (b *bootstrapExecutor) deployNFTStorefrontV2(deployTo flow.Address, env *te panicOnMetaInvokeErrf("failed to deploy NFTStorefrontV2 contract: %s", txError, err) } +func (b *bootstrapExecutor) deployFlowCallbackScheduler(deployTo flow.Address, env *templates.Environment) { + contract := contracts.FlowCallbackScheduler(*env) + txError, err := b.invokeMetaTransaction( + b.ctx, + Transaction(blueprints.DeployContractTransaction(deployTo, contract, "FlowCallbackScheduler"), 0), + ) + + panicOnMetaInvokeErrf("failed to deploy FlowCallbackScheduler contract: %s", txError, err) +} + func (b *bootstrapExecutor) mintInitialTokens( service, fungibleToken, flowToken flow.Address, initialSupply cadence.UFix64, @@ -1277,7 +1293,7 @@ func getContractAddressFromEVMEvent(output ProcedureOutput) (string, error) { } func (b *bootstrapExecutor) registerNodes(service, fungibleToken, flowToken flow.Address) { - for _, id := range b.identities { + for _, node := range b.nodes { // create a staking account for the node nodeAddress := b.createAccount(b.accountKeys.NodeAccountPublicKeys) @@ -1316,7 +1332,7 @@ func (b *bootstrapExecutor) registerNodes(service, fungibleToken, flowToken flow flowToken, fungibleToken, nodeAddress, - id), + node), 0), ) panicOnMetaInvokeErrf("failed to register node: %s", txError, err) diff --git a/fvm/context.go b/fvm/context.go index 37440880811..1353a609522 100644 --- a/fvm/context.go +++ b/fvm/context.go @@ -30,6 +30,7 @@ type Context struct { // limits and set them to MaxUint64, effectively disabling these limits. DisableMemoryAndInteractionLimits bool EVMEnabled bool + ScheduleCallbacksEnabled bool ComputationLimit uint64 MemoryLimit uint64 MaxStateKeySize uint64 @@ -403,3 +404,11 @@ func WithProtocolStateSnapshot(snapshot protocol.SnapshotExecutionSubset) Option return ctx } } + +// WithScheduleCallbacksEnabled enables execution of scheduled callbacks. +func WithScheduleCallbacksEnabled(enabled bool) Option { + return func(ctx Context) Context { + ctx.ScheduleCallbacksEnabled = enabled + return ctx + } +} diff --git a/fvm/environment/account_info.go b/fvm/environment/account_info.go index 5b97669a57f..81a90106da9 100644 --- a/fvm/environment/account_info.go +++ b/fvm/environment/account_info.go @@ -312,7 +312,7 @@ func (info *accountInfo) GetAccountKeys( ) { defer info.tracer.StartChildSpan(trace.FVMEnvGetAccountKeys).End() - accountKeys, err := info.accounts.GetPublicKeys(address) + accountKeys, err := info.accounts.GetAccountPublicKeys(address) if err != nil { return nil, err @@ -330,7 +330,7 @@ func (info *accountInfo) GetAccountKeyByIndex( ) { defer info.tracer.StartChildSpan(trace.FVMEnvGetAccountKey).End() - accountKey, err := info.accounts.GetPublicKey(address, index) + accountKey, err := info.accounts.GetAccountPublicKey(address, index) if err != nil { return nil, err diff --git a/fvm/environment/account_key_reader.go b/fvm/environment/account_key_reader.go index c85d039f086..89c61b19205 100644 --- a/fvm/environment/account_key_reader.go +++ b/fvm/environment/account_key_reader.go @@ -120,7 +120,7 @@ func (reader *accountKeyReader) GetAccountKey( address := flow.ConvertAddress(runtimeAddress) // address verification is also done in this step - accountPublicKey, err := reader.accounts.GetPublicKey( + accountPublicKey, err := reader.accounts.GetAccountPublicKey( address, keyIndex) if err != nil { @@ -167,7 +167,7 @@ func (reader *accountKeyReader) AccountKeysCount( } // address verification is also done in this step - keyCount, err := reader.accounts.GetPublicKeyCount( + keyCount, err := reader.accounts.GetAccountPublicKeyCount( flow.ConvertAddress(runtimeAddress)) return keyCount, err diff --git a/fvm/environment/account_key_updater.go b/fvm/environment/account_key_updater.go index 9797c8b3890..d8bca16f504 100644 --- a/fvm/environment/account_key_updater.go +++ b/fvm/environment/account_key_updater.go @@ -237,7 +237,7 @@ func (updater *accountKeyUpdater) addAccountKey( errors.NewAccountNotFoundError(address)) } - keyIndex, err := updater.accounts.GetPublicKeyCount(address) + keyIndex, err := updater.accounts.GetAccountPublicKeyCount(address) if err != nil { return nil, fmt.Errorf("adding account key failed: %w", err) } @@ -251,7 +251,7 @@ func (updater *accountKeyUpdater) addAccountKey( return nil, fmt.Errorf("adding account key failed: %w", err) } - err = updater.accounts.AppendPublicKey(address, *accountPublicKey) + err = updater.accounts.AppendAccountPublicKey(address, *accountPublicKey) if err != nil { return nil, fmt.Errorf("adding account key failed: %w", err) } @@ -293,7 +293,7 @@ func (updater *accountKeyUpdater) revokeAccountKey( } var publicKey flow.AccountPublicKey - publicKey, err = updater.accounts.GetPublicKey( + publicKey, err = updater.accounts.GetAccountPublicKey( address, keyIndex) if err != nil { @@ -310,7 +310,7 @@ func (updater *accountKeyUpdater) revokeAccountKey( // mark this key as revoked publicKey.Revoked = true - _, err = updater.accounts.SetPublicKey( + _, err = updater.accounts.SetAccountPublicKey( address, keyIndex, publicKey) diff --git a/fvm/environment/account_key_updater_test.go b/fvm/environment/account_key_updater_test.go index 484740f0484..43def565a87 100644 --- a/fvm/environment/account_key_updater_test.go +++ b/fvm/environment/account_key_updater_test.go @@ -151,21 +151,23 @@ var _ environment.Accounts = &FakeAccounts{} func (f FakeAccounts) Exists(address flow.Address) (bool, error) { return true, nil } func (f FakeAccounts) Get(address flow.Address) (*flow.Account, error) { return &flow.Account{}, nil } -func (f FakeAccounts) GetPublicKeyCount(_ flow.Address) (uint32, error) { +func (f FakeAccounts) GetAccountPublicKeyCount(_ flow.Address) (uint32, error) { return f.keyCount, nil } -func (f FakeAccounts) AppendPublicKey(_ flow.Address, _ flow.AccountPublicKey) error { return nil } -func (f FakeAccounts) GetPublicKey(address flow.Address, keyIndex uint32) (flow.AccountPublicKey, error) { +func (f FakeAccounts) AppendAccountPublicKey(_ flow.Address, _ flow.AccountPublicKey) error { + return nil +} +func (f FakeAccounts) GetAccountPublicKey(address flow.Address, keyIndex uint32) (flow.AccountPublicKey, error) { if keyIndex >= f.keyCount { return flow.AccountPublicKey{}, errors.NewAccountPublicKeyNotFoundError(address, keyIndex) } return FakePublicKey{}.toAccountPublicKey(), nil } -func (f FakeAccounts) SetPublicKey(_ flow.Address, _ uint32, _ flow.AccountPublicKey) ([]byte, error) { +func (f FakeAccounts) SetAccountPublicKey(_ flow.Address, _ uint32, _ flow.AccountPublicKey) ([]byte, error) { return nil, nil } -func (f FakeAccounts) GetPublicKeys(address flow.Address) ([]flow.AccountPublicKey, error) { +func (f FakeAccounts) GetAccountPublicKeys(address flow.Address) ([]flow.AccountPublicKey, error) { return make([]flow.AccountPublicKey, f.keyCount), nil } func (f FakeAccounts) GetContractNames(_ flow.Address) ([]string, error) { return nil, nil } diff --git a/fvm/environment/accounts.go b/fvm/environment/accounts.go index cdde5154b9c..576742bf97d 100644 --- a/fvm/environment/accounts.go +++ b/fvm/environment/accounts.go @@ -23,11 +23,11 @@ const ( type Accounts interface { Exists(address flow.Address) (bool, error) Get(address flow.Address) (*flow.Account, error) - GetPublicKeyCount(address flow.Address) (uint32, error) - AppendPublicKey(address flow.Address, key flow.AccountPublicKey) error - GetPublicKey(address flow.Address, keyIndex uint32) (flow.AccountPublicKey, error) - SetPublicKey(address flow.Address, keyIndex uint32, publicKey flow.AccountPublicKey) ([]byte, error) - GetPublicKeys(address flow.Address) ([]flow.AccountPublicKey, error) + GetAccountPublicKeyCount(address flow.Address) (uint32, error) + AppendAccountPublicKey(address flow.Address, key flow.AccountPublicKey) error + GetAccountPublicKey(address flow.Address, keyIndex uint32) (flow.AccountPublicKey, error) + SetAccountPublicKey(address flow.Address, keyIndex uint32, publicKey flow.AccountPublicKey) ([]byte, error) + GetAccountPublicKeys(address flow.Address) ([]flow.AccountPublicKey, error) GetContractNames(address flow.Address) ([]string, error) GetContract(contractName string, address flow.Address) ([]byte, error) ContractExists(contractName string, address flow.Address) (bool, error) @@ -124,7 +124,7 @@ func (a *StatefulAccounts) Get(address flow.Address) (*flow.Account, error) { } var publicKeys []flow.AccountPublicKey - publicKeys, err = a.GetPublicKeys(address) + publicKeys, err = a.GetAccountPublicKeys(address) if err != nil { return nil, err } @@ -181,10 +181,10 @@ func (a *StatefulAccounts) Create( return fmt.Errorf("failed to create a new account: %w", err) } - return a.SetAllPublicKeys(newAddress, publicKeys) + return a.SetAllAccountPublicKeys(newAddress, publicKeys) } -func (a *StatefulAccounts) GetPublicKey( +func (a *StatefulAccounts) GetAccountPublicKey( address flow.Address, keyIndex uint32, ) ( @@ -212,7 +212,7 @@ func (a *StatefulAccounts) GetPublicKey( return decodedPublicKey, nil } -func (a *StatefulAccounts) GetPublicKeyCount( +func (a *StatefulAccounts) GetAccountPublicKeyCount( address flow.Address, ) ( uint32, @@ -222,7 +222,7 @@ func (a *StatefulAccounts) GetPublicKeyCount( if err != nil { return 0, fmt.Errorf("failed to get public key count: %w", err) } - return status.PublicKeyCount(), nil + return status.AccountPublicKeyCount(), nil } func (a *StatefulAccounts) setPublicKeyCount( @@ -237,7 +237,7 @@ func (a *StatefulAccounts) setPublicKeyCount( err) } - status.SetPublicKeyCount(count) + status.SetAccountPublicKeyCount(count) err = a.setAccountStatus(address, status) if err != nil { @@ -249,20 +249,20 @@ func (a *StatefulAccounts) setPublicKeyCount( return nil } -func (a *StatefulAccounts) GetPublicKeys( +func (a *StatefulAccounts) GetAccountPublicKeys( address flow.Address, ) ( publicKeys []flow.AccountPublicKey, err error, ) { - count, err := a.GetPublicKeyCount(address) + count, err := a.GetAccountPublicKeyCount(address) if err != nil { return nil, err } publicKeys = make([]flow.AccountPublicKey, count) for i := uint32(0); i < count; i++ { - publicKey, err := a.GetPublicKey(address, i) + publicKey, err := a.GetAccountPublicKey(address, i) if err != nil { return nil, err } @@ -273,7 +273,7 @@ func (a *StatefulAccounts) GetPublicKeys( return publicKeys, nil } -func (a *StatefulAccounts) SetPublicKey( +func (a *StatefulAccounts) SetAccountPublicKey( address flow.Address, keyIndex uint32, publicKey flow.AccountPublicKey, @@ -303,7 +303,7 @@ func (a *StatefulAccounts) SetPublicKey( return encodedPublicKey, err } -func (a *StatefulAccounts) SetAllPublicKeys( +func (a *StatefulAccounts) SetAllAccountPublicKeys( address flow.Address, publicKeys []flow.AccountPublicKey, ) error { @@ -318,7 +318,7 @@ func (a *StatefulAccounts) SetAllPublicKeys( } for i, publicKey := range publicKeys { - _, err := a.SetPublicKey(address, uint32(i), publicKey) + _, err := a.SetAccountPublicKey(address, uint32(i), publicKey) if err != nil { return err } @@ -327,7 +327,7 @@ func (a *StatefulAccounts) SetAllPublicKeys( return a.setPublicKeyCount(address, count) } -func (a *StatefulAccounts) AppendPublicKey( +func (a *StatefulAccounts) AppendAccountPublicKey( address flow.Address, publicKey flow.AccountPublicKey, ) error { @@ -344,7 +344,7 @@ func (a *StatefulAccounts) AppendPublicKey( "signature algorithm type not found") } - count, err := a.GetPublicKeyCount(address) + count, err := a.GetAccountPublicKeyCount(address) if err != nil { return err } @@ -356,7 +356,7 @@ func (a *StatefulAccounts) AppendPublicKey( MaxPublicKeyCount) } - _, err = a.SetPublicKey(address, count, publicKey) + _, err = a.SetAccountPublicKey(address, count, publicKey) if err != nil { return err } diff --git a/fvm/environment/accounts_status.go b/fvm/environment/accounts_status.go index e7726b63d42..3d3b0262fce 100644 --- a/fvm/environment/accounts_status.go +++ b/fvm/environment/accounts_status.go @@ -11,12 +11,12 @@ import ( ) const ( - flagSize = 1 - storageUsedSize = 8 - storageIndexSize = 8 - oldPublicKeyCountsSize = 8 - publicKeyCountsSize = 4 - addressIdCounterSize = 8 + flagSize = 1 + storageUsedSize = 8 + storageIndexSize = 8 + oldAccountPublicKeyCountsSize = 8 + accountPublicKeyCountsSize = 4 + addressIdCounterSize = 8 // accountStatusSizeV1 is the size of the account status before the address // id counter was added. After Crescendo check if it can be removed as all accounts @@ -24,7 +24,7 @@ const ( accountStatusSizeV1 = flagSize + storageUsedSize + storageIndexSize + - oldPublicKeyCountsSize + oldAccountPublicKeyCountsSize // accountStatusSizeV2 is the size of the account status before // the public key count was changed from 8 to 4 bytes long. @@ -33,20 +33,20 @@ const ( accountStatusSizeV2 = flagSize + storageUsedSize + storageIndexSize + - oldPublicKeyCountsSize + + oldAccountPublicKeyCountsSize + addressIdCounterSize accountStatusSizeV3 = flagSize + storageUsedSize + storageIndexSize + - publicKeyCountsSize + + accountPublicKeyCountsSize + addressIdCounterSize - flagIndex = 0 - storageUsedStartIndex = flagIndex + flagSize - storageIndexStartIndex = storageUsedStartIndex + storageUsedSize - publicKeyCountsStartIndex = storageIndexStartIndex + storageIndexSize - addressIdCounterStartIndex = publicKeyCountsStartIndex + publicKeyCountsSize + flagIndex = 0 + storageUsedStartIndex = flagIndex + flagSize + storageIndexStartIndex = storageUsedStartIndex + storageUsedSize + accountPublicKeyCountsStartIndex = storageIndexStartIndex + storageIndexSize + addressIdCounterStartIndex = accountPublicKeyCountsStartIndex + accountPublicKeyCountsSize ) // AccountStatus holds meta information about an account @@ -118,7 +118,7 @@ func AccountStatusFromBytes(inp []byte) (*AccountStatus, error) { cutEnd := flagSize + storageUsedSize + storageIndexSize + - (oldPublicKeyCountsSize - publicKeyCountsSize) + (oldAccountPublicKeyCountsSize - accountPublicKeyCountsSize) // check if the public key count is larger than 4 bytes for i := cutStart; i < cutEnd; i++ { @@ -128,7 +128,7 @@ func AccountStatusFromBytes(inp []byte) (*AccountStatus, error) { storageIndexSize:flagSize+ storageUsedSize+ storageIndexSize+ - oldPublicKeyCountsSize]), inp2[i]) + oldAccountPublicKeyCountsSize]), inp2[i]) } } @@ -192,14 +192,14 @@ func (a *AccountStatus) SlabIndex() atree.SlabIndex { return index } -// SetPublicKeyCount updates the public key count of the account -func (a *AccountStatus) SetPublicKeyCount(count uint32) { - binary.BigEndian.PutUint32(a[publicKeyCountsStartIndex:publicKeyCountsStartIndex+publicKeyCountsSize], count) +// SetAccountPublicKeyCount updates the account public key count of the account +func (a *AccountStatus) SetAccountPublicKeyCount(count uint32) { + binary.BigEndian.PutUint32(a[accountPublicKeyCountsStartIndex:accountPublicKeyCountsStartIndex+accountPublicKeyCountsSize], count) } -// PublicKeyCount returns the public key count of the account -func (a *AccountStatus) PublicKeyCount() uint32 { - return binary.BigEndian.Uint32(a[publicKeyCountsStartIndex : publicKeyCountsStartIndex+publicKeyCountsSize]) +// AccountPublicKeyCount returns the account public key count of the account +func (a *AccountStatus) AccountPublicKeyCount() uint32 { + return binary.BigEndian.Uint32(a[accountPublicKeyCountsStartIndex : accountPublicKeyCountsStartIndex+accountPublicKeyCountsSize]) } // SetAccountIdCounter updates id counter of the account diff --git a/fvm/environment/accounts_status_test.go b/fvm/environment/accounts_status_test.go index e9c12b375e7..1785b8bdec7 100644 --- a/fvm/environment/accounts_status_test.go +++ b/fvm/environment/accounts_status_test.go @@ -17,14 +17,14 @@ func TestAccountStatus(t *testing.T) { t.Run("test setting values", func(t *testing.T) { index := atree.SlabIndex{1, 2, 3, 4, 5, 6, 7, 8} s.SetStorageIndex(index) - s.SetPublicKeyCount(34) + s.SetAccountPublicKeyCount(34) s.SetStorageUsed(56) s.SetAccountIdCounter(78) require.Equal(t, uint64(56), s.StorageUsed()) returnedIndex := s.SlabIndex() require.True(t, bytes.Equal(index[:], returnedIndex[:])) - require.Equal(t, uint32(34), s.PublicKeyCount()) + require.Equal(t, uint32(34), s.AccountPublicKeyCount()) require.Equal(t, uint64(78), s.AccountIdCounter()) }) @@ -34,7 +34,7 @@ func TestAccountStatus(t *testing.T) { clone, err := environment.AccountStatusFromBytes(b) require.NoError(t, err) require.Equal(t, s.SlabIndex(), clone.SlabIndex()) - require.Equal(t, s.PublicKeyCount(), clone.PublicKeyCount()) + require.Equal(t, s.AccountPublicKeyCount(), clone.AccountPublicKeyCount()) require.Equal(t, s.StorageUsed(), clone.StorageUsed()) require.Equal(t, s.AccountIdCounter(), clone.AccountIdCounter()) @@ -60,7 +60,7 @@ func TestAccountStatus(t *testing.T) { migrated, err := environment.AccountStatusFromBytes(oldBytes) require.NoError(t, err) require.Equal(t, atree.SlabIndex{0, 0, 0, 0, 0, 0, 0, 6}, migrated.SlabIndex()) - require.Equal(t, uint32(5), migrated.PublicKeyCount()) + require.Equal(t, uint32(5), migrated.AccountPublicKeyCount()) require.Equal(t, uint64(7)+increaseInSize, migrated.StorageUsed()) require.Equal(t, uint64(0), migrated.AccountIdCounter()) }) @@ -82,7 +82,7 @@ func TestAccountStatus(t *testing.T) { migrated, err := environment.AccountStatusFromBytes(oldBytes) require.NoError(t, err) require.Equal(t, atree.SlabIndex{0, 0, 0, 0, 0, 0, 0, 6}, migrated.SlabIndex()) - require.Equal(t, uint32(5), migrated.PublicKeyCount()) + require.Equal(t, uint32(5), migrated.AccountPublicKeyCount()) require.Equal(t, uint64(7)-decreaseInSize, migrated.StorageUsed()) require.Equal(t, uint64(3), migrated.AccountIdCounter()) }) diff --git a/fvm/environment/accounts_test.go b/fvm/environment/accounts_test.go index 41bc12a05a9..4694c77b90c 100644 --- a/fvm/environment/accounts_test.go +++ b/fvm/environment/accounts_test.go @@ -76,7 +76,7 @@ func TestAccounts_GetPublicKey(t *testing.T) { err := accounts.Create(nil, address) require.NoError(t, err) - _, err = accounts.GetPublicKey(address, 0) + _, err = accounts.GetAccountPublicKey(address, 0) require.True(t, errors.IsAccountPublicKeyNotFoundError(err)) } }) @@ -101,7 +101,7 @@ func TestAccounts_GetPublicKeyCount(t *testing.T) { err := accounts.Create(nil, address) require.NoError(t, err) - count, err := accounts.GetPublicKeyCount(address) + count, err := accounts.GetAccountPublicKeyCount(address) require.NoError(t, err) require.Equal(t, uint32(0), count) } @@ -128,7 +128,7 @@ func TestAccounts_GetPublicKeys(t *testing.T) { err := accounts.Create(nil, address) require.NoError(t, err) - keys, err := accounts.GetPublicKeys(address) + keys, err := accounts.GetAccountPublicKeys(address) require.NoError(t, err) require.Empty(t, keys) } diff --git a/fvm/environment/mock/accounts.go b/fvm/environment/mock/accounts.go index 74c9751eede..8f8d0dfe221 100644 --- a/fvm/environment/mock/accounts.go +++ b/fvm/environment/mock/accounts.go @@ -45,12 +45,12 @@ func (_m *Accounts) AllocateSlabIndex(address flow.Address) (atree.SlabIndex, er return r0, r1 } -// AppendPublicKey provides a mock function with given fields: address, key -func (_m *Accounts) AppendPublicKey(address flow.Address, key flow.AccountPublicKey) error { +// AppendAccountPublicKey provides a mock function with given fields: address, key +func (_m *Accounts) AppendAccountPublicKey(address flow.Address, key flow.AccountPublicKey) error { ret := _m.Called(address, key) if len(ret) == 0 { - panic("no return value specified for AppendPublicKey") + panic("no return value specified for AppendAccountPublicKey") } var r0 error @@ -213,29 +213,27 @@ func (_m *Accounts) Get(address flow.Address) (*flow.Account, error) { return r0, r1 } -// GetContract provides a mock function with given fields: contractName, address -func (_m *Accounts) GetContract(contractName string, address flow.Address) ([]byte, error) { - ret := _m.Called(contractName, address) +// GetAccountPublicKey provides a mock function with given fields: address, keyIndex +func (_m *Accounts) GetAccountPublicKey(address flow.Address, keyIndex uint32) (flow.AccountPublicKey, error) { + ret := _m.Called(address, keyIndex) if len(ret) == 0 { - panic("no return value specified for GetContract") + panic("no return value specified for GetAccountPublicKey") } - var r0 []byte + var r0 flow.AccountPublicKey var r1 error - if rf, ok := ret.Get(0).(func(string, flow.Address) ([]byte, error)); ok { - return rf(contractName, address) + if rf, ok := ret.Get(0).(func(flow.Address, uint32) (flow.AccountPublicKey, error)); ok { + return rf(address, keyIndex) } - if rf, ok := ret.Get(0).(func(string, flow.Address) []byte); ok { - r0 = rf(contractName, address) + if rf, ok := ret.Get(0).(func(flow.Address, uint32) flow.AccountPublicKey); ok { + r0 = rf(address, keyIndex) } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } + r0 = ret.Get(0).(flow.AccountPublicKey) } - if rf, ok := ret.Get(1).(func(string, flow.Address) error); ok { - r1 = rf(contractName, address) + if rf, ok := ret.Get(1).(func(flow.Address, uint32) error); ok { + r1 = rf(address, keyIndex) } else { r1 = ret.Error(1) } @@ -243,25 +241,23 @@ func (_m *Accounts) GetContract(contractName string, address flow.Address) ([]by return r0, r1 } -// GetContractNames provides a mock function with given fields: address -func (_m *Accounts) GetContractNames(address flow.Address) ([]string, error) { +// GetAccountPublicKeyCount provides a mock function with given fields: address +func (_m *Accounts) GetAccountPublicKeyCount(address flow.Address) (uint32, error) { ret := _m.Called(address) if len(ret) == 0 { - panic("no return value specified for GetContractNames") + panic("no return value specified for GetAccountPublicKeyCount") } - var r0 []string + var r0 uint32 var r1 error - if rf, ok := ret.Get(0).(func(flow.Address) ([]string, error)); ok { + if rf, ok := ret.Get(0).(func(flow.Address) (uint32, error)); ok { return rf(address) } - if rf, ok := ret.Get(0).(func(flow.Address) []string); ok { + if rf, ok := ret.Get(0).(func(flow.Address) uint32); ok { r0 = rf(address) } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]string) - } + r0 = ret.Get(0).(uint32) } if rf, ok := ret.Get(1).(func(flow.Address) error); ok { @@ -273,27 +269,29 @@ func (_m *Accounts) GetContractNames(address flow.Address) ([]string, error) { return r0, r1 } -// GetPublicKey provides a mock function with given fields: address, keyIndex -func (_m *Accounts) GetPublicKey(address flow.Address, keyIndex uint32) (flow.AccountPublicKey, error) { - ret := _m.Called(address, keyIndex) +// GetAccountPublicKeys provides a mock function with given fields: address +func (_m *Accounts) GetAccountPublicKeys(address flow.Address) ([]flow.AccountPublicKey, error) { + ret := _m.Called(address) if len(ret) == 0 { - panic("no return value specified for GetPublicKey") + panic("no return value specified for GetAccountPublicKeys") } - var r0 flow.AccountPublicKey + var r0 []flow.AccountPublicKey var r1 error - if rf, ok := ret.Get(0).(func(flow.Address, uint32) (flow.AccountPublicKey, error)); ok { - return rf(address, keyIndex) + if rf, ok := ret.Get(0).(func(flow.Address) ([]flow.AccountPublicKey, error)); ok { + return rf(address) } - if rf, ok := ret.Get(0).(func(flow.Address, uint32) flow.AccountPublicKey); ok { - r0 = rf(address, keyIndex) + if rf, ok := ret.Get(0).(func(flow.Address) []flow.AccountPublicKey); ok { + r0 = rf(address) } else { - r0 = ret.Get(0).(flow.AccountPublicKey) + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.AccountPublicKey) + } } - if rf, ok := ret.Get(1).(func(flow.Address, uint32) error); ok { - r1 = rf(address, keyIndex) + if rf, ok := ret.Get(1).(func(flow.Address) error); ok { + r1 = rf(address) } else { r1 = ret.Error(1) } @@ -301,27 +299,29 @@ func (_m *Accounts) GetPublicKey(address flow.Address, keyIndex uint32) (flow.Ac return r0, r1 } -// GetPublicKeyCount provides a mock function with given fields: address -func (_m *Accounts) GetPublicKeyCount(address flow.Address) (uint32, error) { - ret := _m.Called(address) +// GetContract provides a mock function with given fields: contractName, address +func (_m *Accounts) GetContract(contractName string, address flow.Address) ([]byte, error) { + ret := _m.Called(contractName, address) if len(ret) == 0 { - panic("no return value specified for GetPublicKeyCount") + panic("no return value specified for GetContract") } - var r0 uint32 + var r0 []byte var r1 error - if rf, ok := ret.Get(0).(func(flow.Address) (uint32, error)); ok { - return rf(address) + if rf, ok := ret.Get(0).(func(string, flow.Address) ([]byte, error)); ok { + return rf(contractName, address) } - if rf, ok := ret.Get(0).(func(flow.Address) uint32); ok { - r0 = rf(address) + if rf, ok := ret.Get(0).(func(string, flow.Address) []byte); ok { + r0 = rf(contractName, address) } else { - r0 = ret.Get(0).(uint32) + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } } - if rf, ok := ret.Get(1).(func(flow.Address) error); ok { - r1 = rf(address) + if rf, ok := ret.Get(1).(func(string, flow.Address) error); ok { + r1 = rf(contractName, address) } else { r1 = ret.Error(1) } @@ -329,24 +329,24 @@ func (_m *Accounts) GetPublicKeyCount(address flow.Address) (uint32, error) { return r0, r1 } -// GetPublicKeys provides a mock function with given fields: address -func (_m *Accounts) GetPublicKeys(address flow.Address) ([]flow.AccountPublicKey, error) { +// GetContractNames provides a mock function with given fields: address +func (_m *Accounts) GetContractNames(address flow.Address) ([]string, error) { ret := _m.Called(address) if len(ret) == 0 { - panic("no return value specified for GetPublicKeys") + panic("no return value specified for GetContractNames") } - var r0 []flow.AccountPublicKey + var r0 []string var r1 error - if rf, ok := ret.Get(0).(func(flow.Address) ([]flow.AccountPublicKey, error)); ok { + if rf, ok := ret.Get(0).(func(flow.Address) ([]string, error)); ok { return rf(address) } - if rf, ok := ret.Get(0).(func(flow.Address) []flow.AccountPublicKey); ok { + if rf, ok := ret.Get(0).(func(flow.Address) []string); ok { r0 = rf(address) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]flow.AccountPublicKey) + r0 = ret.Get(0).([]string) } } @@ -417,30 +417,12 @@ func (_m *Accounts) GetValue(id flow.RegisterID) ([]byte, error) { return r0, r1 } -// SetContract provides a mock function with given fields: contractName, address, contract -func (_m *Accounts) SetContract(contractName string, address flow.Address, contract []byte) error { - ret := _m.Called(contractName, address, contract) - - if len(ret) == 0 { - panic("no return value specified for SetContract") - } - - var r0 error - if rf, ok := ret.Get(0).(func(string, flow.Address, []byte) error); ok { - r0 = rf(contractName, address, contract) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SetPublicKey provides a mock function with given fields: address, keyIndex, publicKey -func (_m *Accounts) SetPublicKey(address flow.Address, keyIndex uint32, publicKey flow.AccountPublicKey) ([]byte, error) { +// SetAccountPublicKey provides a mock function with given fields: address, keyIndex, publicKey +func (_m *Accounts) SetAccountPublicKey(address flow.Address, keyIndex uint32, publicKey flow.AccountPublicKey) ([]byte, error) { ret := _m.Called(address, keyIndex, publicKey) if len(ret) == 0 { - panic("no return value specified for SetPublicKey") + panic("no return value specified for SetAccountPublicKey") } var r0 []byte @@ -465,6 +447,24 @@ func (_m *Accounts) SetPublicKey(address flow.Address, keyIndex uint32, publicKe return r0, r1 } +// SetContract provides a mock function with given fields: contractName, address, contract +func (_m *Accounts) SetContract(contractName string, address flow.Address, contract []byte) error { + ret := _m.Called(contractName, address, contract) + + if len(ret) == 0 { + panic("no return value specified for SetContract") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, flow.Address, []byte) error); ok { + r0 = rf(contractName, address, contract) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // SetValue provides a mock function with given fields: id, value func (_m *Accounts) SetValue(id flow.RegisterID, value []byte) error { ret := _m.Called(id, value) diff --git a/fvm/evm/emulator/config.go b/fvm/evm/emulator/config.go index ad6fcccaaf2..baee338825f 100644 --- a/fvm/evm/emulator/config.go +++ b/fvm/evm/emulator/config.go @@ -3,11 +3,11 @@ package emulator import ( "math/big" - gethCommon "github.com/onflow/go-ethereum/common" - gethCore "github.com/onflow/go-ethereum/core" - gethVM "github.com/onflow/go-ethereum/core/vm" - "github.com/onflow/go-ethereum/eth/tracers" - gethParams "github.com/onflow/go-ethereum/params" + gethCommon "github.com/ethereum/go-ethereum/common" + gethCore "github.com/ethereum/go-ethereum/core" + gethVM "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" + gethParams "github.com/ethereum/go-ethereum/params" "github.com/onflow/flow-go/fvm/evm/types" ) @@ -44,6 +44,10 @@ type Config struct { // BlockTotalGasSoFar captures the total // amount of gas used so far BlockTotalGasUsedSoFar uint64 + // PrecompiledContracts holds the applicable precompiled contracts + // for the current chain rules, as well as any extra precompiled + // contracts, such as Cadence Arch etc + PrecompiledContracts gethVM.PrecompiledContracts } // ChainRules returns the chain rules @@ -133,7 +137,6 @@ func defaultConfig() *Config { GetHash: func(n uint64) gethCommon.Hash { return gethCommon.Hash{} }, - GetPrecompile: gethCore.GetPrecompile, }, PCTracker: NewCallTracker(), } @@ -232,19 +235,14 @@ func WithDirectCallBaseGasUsage(gas uint64) Option { // WithExtraPrecompiledContracts appends the precompiled contract list with extra precompiled contracts func WithExtraPrecompiledContracts(precompiledContracts []types.PrecompiledContract) Option { return func(c *Config) *Config { - extraPreCompMap := make(map[gethCommon.Address]gethVM.PrecompiledContract) + activePrecompiledContracts := gethVM.ActivePrecompiledContracts(c.ChainRules()) for _, pc := range precompiledContracts { // wrap pcs for tracking wpc := c.PCTracker.RegisterPrecompiledContract(pc) - extraPreCompMap[pc.Address().ToCommon()] = wpc - } - c.BlockContext.GetPrecompile = func(rules gethParams.Rules, addr gethCommon.Address) (gethVM.PrecompiledContract, bool) { - prec, found := extraPreCompMap[addr] - if found { - return prec, true - } - return gethCore.GetPrecompile(rules, addr) + activePrecompiledContracts[pc.Address().ToCommon()] = wpc } + c.PrecompiledContracts = activePrecompiledContracts + return c } } diff --git a/fvm/evm/emulator/emulator.go b/fvm/evm/emulator/emulator.go index 2918fdd0191..e8acd837af2 100644 --- a/fvm/evm/emulator/emulator.go +++ b/fvm/evm/emulator/emulator.go @@ -4,16 +4,16 @@ import ( "errors" "math/big" + gethCommon "github.com/ethereum/go-ethereum/common" + gethCore "github.com/ethereum/go-ethereum/core" + gethTracing "github.com/ethereum/go-ethereum/core/tracing" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethVM "github.com/ethereum/go-ethereum/core/vm" + gethCrypto "github.com/ethereum/go-ethereum/crypto" + gethParams "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" "github.com/onflow/atree" "github.com/onflow/crypto/hash" - gethCommon "github.com/onflow/go-ethereum/common" - gethCore "github.com/onflow/go-ethereum/core" - gethTracing "github.com/onflow/go-ethereum/core/tracing" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethVM "github.com/onflow/go-ethereum/core/vm" - gethCrypto "github.com/onflow/go-ethereum/crypto" - gethParams "github.com/onflow/go-ethereum/params" "github.com/onflow/flow-go/fvm/evm/emulator/state" "github.com/onflow/flow-go/fvm/evm/types" @@ -325,6 +325,10 @@ func (bl *BlockView) newProcedure() (*procedure, error) { cfg.EVMConfig, ) evm.SetTxContext(*cfg.TxContext) + // inject the applicable precompiled contracts for the current + // chain rules, as well as any extra precompiled contracts, + // such as Cadence Arch etc + evm.SetPrecompiles(cfg.PrecompiledContracts) return &procedure{ config: cfg, diff --git a/fvm/evm/emulator/emulator_test.go b/fvm/evm/emulator/emulator_test.go index 5453a13e672..b09b93e5d04 100644 --- a/fvm/evm/emulator/emulator_test.go +++ b/fvm/evm/emulator/emulator_test.go @@ -8,10 +8,10 @@ import ( "strings" "testing" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethVM "github.com/onflow/go-ethereum/core/vm" - gethParams "github.com/onflow/go-ethereum/params" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethVM "github.com/ethereum/go-ethereum/core/vm" + gethParams "github.com/ethereum/go-ethereum/params" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/evm/emulator" @@ -20,7 +20,7 @@ import ( "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/model/flow" - _ "github.com/onflow/go-ethereum/eth/tracers/native" // imported so callTracers is registered in init + _ "github.com/ethereum/go-ethereum/eth/tracers/native" // imported so callTracers is registered in init ) var blockNumber = big.NewInt(10) diff --git a/fvm/evm/emulator/signer.go b/fvm/evm/emulator/signer.go index 06573c34056..44b2964f843 100644 --- a/fvm/evm/emulator/signer.go +++ b/fvm/evm/emulator/signer.go @@ -3,7 +3,7 @@ package emulator import ( "math/big" - "github.com/onflow/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/types" ) var defaultBlockNumberForEVMRules = big.NewInt(1) // anything bigger than 0 diff --git a/fvm/evm/emulator/state/account.go b/fvm/evm/emulator/state/account.go index f2f90d3f253..47640df1c98 100644 --- a/fvm/evm/emulator/state/account.go +++ b/fvm/evm/emulator/state/account.go @@ -1,10 +1,10 @@ package state import ( + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" "github.com/holiman/uint256" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" - "github.com/onflow/go-ethereum/rlp" ) // Account holds the metadata of an address and provides (de)serialization functionality diff --git a/fvm/evm/emulator/state/account_test.go b/fvm/evm/emulator/state/account_test.go index 4720b65b8b8..4d8d31ff08d 100644 --- a/fvm/evm/emulator/state/account_test.go +++ b/fvm/evm/emulator/state/account_test.go @@ -3,7 +3,7 @@ package state_test import ( "testing" - "github.com/onflow/go-ethereum/common" + "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/evm/emulator/state" diff --git a/fvm/evm/emulator/state/base.go b/fvm/evm/emulator/state/base.go index 0f690b7367a..fb666dcc0b5 100644 --- a/fvm/evm/emulator/state/base.go +++ b/fvm/evm/emulator/state/base.go @@ -3,12 +3,12 @@ package state import ( "fmt" + "github.com/ethereum/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" "github.com/holiman/uint256" "github.com/onflow/atree" - "github.com/onflow/go-ethereum/common" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethCrypto "github.com/onflow/go-ethereum/crypto" "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/model/flow" diff --git a/fvm/evm/emulator/state/base_test.go b/fvm/evm/emulator/state/base_test.go index f1decb3090a..fec5bc6b082 100644 --- a/fvm/evm/emulator/state/base_test.go +++ b/fvm/evm/emulator/state/base_test.go @@ -3,10 +3,10 @@ package state_test import ( "testing" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" "github.com/holiman/uint256" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethCrypto "github.com/onflow/go-ethereum/crypto" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/evm/emulator/state" diff --git a/fvm/evm/emulator/state/code.go b/fvm/evm/emulator/state/code.go index d641e43943d..1353cf5a69b 100644 --- a/fvm/evm/emulator/state/code.go +++ b/fvm/evm/emulator/state/code.go @@ -4,8 +4,8 @@ import ( "encoding/binary" "fmt" - gethCommon "github.com/onflow/go-ethereum/common" - "github.com/onflow/go-ethereum/rlp" + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" ) // CodeContainer contains codes and keeps diff --git a/fvm/evm/emulator/state/delta.go b/fvm/evm/emulator/state/delta.go index 0304b592f01..6f3838dc7f9 100644 --- a/fvm/evm/emulator/state/delta.go +++ b/fvm/evm/emulator/state/delta.go @@ -3,10 +3,10 @@ package state import ( "fmt" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" "github.com/holiman/uint256" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethCrypto "github.com/onflow/go-ethereum/crypto" "github.com/onflow/flow-go/fvm/evm/types" ) @@ -476,7 +476,15 @@ func (d *DeltaView) SubRefund(amount uint64) error { return nil } -// AddressInAccessList checks if the address is in the access list +// AddressInAccessList checks if the address is in the access list of +// the current view. +// NOTE: Due to resource constraints (such as CPU & memory), and the +// high-frequency usage of this function from EVM, we do not look up +// the parents until the root view or until we find a view that has +// the address in its local access list. +// As an optimization, the `StateDB.AddressInAccessList` is responsible +// for optimally traversing the views, to check if the address is in +// the access list. func (d *DeltaView) AddressInAccessList(addr gethCommon.Address) bool { if d.accessListAddresses != nil { _, addressFound := d.accessListAddresses[addr] @@ -484,7 +492,7 @@ func (d *DeltaView) AddressInAccessList(addr gethCommon.Address) bool { return true } } - return d.parent.AddressInAccessList(addr) + return false } // AddAddressToAccessList adds an address to the access list @@ -498,7 +506,15 @@ func (d *DeltaView) AddAddressToAccessList(addr gethCommon.Address) bool { return !addrPresent } -// SlotInAccessList checks if the slot is in the access list +// SlotInAccessList checks if the slot is in the access list of the +// current view. +// NOTE: Due to resource constraints (such as CPU & memory), and the +// high-frequency usage of this function from EVM, we do not look up +// the parents until the root view or until we find a view that has +// the slot in its local access list. +// As an optimization, the `StateDB.SlotInAccessList` is responsible +// for optimally traversing the views, to check if the slot is in +// the access list. func (d *DeltaView) SlotInAccessList(sk types.SlotAddress) (addressOk bool, slotOk bool) { addressFound := d.AddressInAccessList(sk.Address) if d.accessListSlots != nil { @@ -507,8 +523,7 @@ func (d *DeltaView) SlotInAccessList(sk types.SlotAddress) (addressOk bool, slot return addressFound, true } } - _, slotFound := d.parent.SlotInAccessList(sk) - return addressFound, slotFound + return addressFound, false } // AddSlotToAccessList adds a slot to the access list diff --git a/fvm/evm/emulator/state/delta_test.go b/fvm/evm/emulator/state/delta_test.go index a0763e91924..9ca089888af 100644 --- a/fvm/evm/emulator/state/delta_test.go +++ b/fvm/evm/emulator/state/delta_test.go @@ -4,10 +4,10 @@ import ( "fmt" "testing" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" "github.com/holiman/uint256" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethCrypto "github.com/onflow/go-ethereum/crypto" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/evm/emulator/state" @@ -496,7 +496,7 @@ func TestDeltaView(t *testing.T) { }) // check address through parent - require.True(t, view.AddressInAccessList(addr1)) + require.False(t, view.AddressInAccessList(addr1)) // add addr 2 to the list require.False(t, view.AddressInAccessList(addr2)) @@ -511,7 +511,7 @@ func TestDeltaView(t *testing.T) { // check slot through parent addrFound, slotFound := view.SlotInAccessList(slot1) require.False(t, addrFound) - require.True(t, slotFound) + require.False(t, slotFound) // add slot 2 to the list addrFound, slotFound = view.SlotInAccessList(slot2) diff --git a/fvm/evm/emulator/state/exporter.go b/fvm/evm/emulator/state/exporter.go index f1cb9bcfa10..32005fa53a9 100644 --- a/fvm/evm/emulator/state/exporter.go +++ b/fvm/evm/emulator/state/exporter.go @@ -7,8 +7,8 @@ import ( "os" "path/filepath" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/onflow/atree" - gethCommon "github.com/onflow/go-ethereum/common" "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/model/flow" diff --git a/fvm/evm/emulator/state/extract.go b/fvm/evm/emulator/state/extract.go index e0bb30d82aa..925c713ef6d 100644 --- a/fvm/evm/emulator/state/extract.go +++ b/fvm/evm/emulator/state/extract.go @@ -1,7 +1,7 @@ package state import ( - gethCommon "github.com/onflow/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/model/flow" diff --git a/fvm/evm/emulator/state/importer.go b/fvm/evm/emulator/state/importer.go index 132846512f4..5eae814086d 100644 --- a/fvm/evm/emulator/state/importer.go +++ b/fvm/evm/emulator/state/importer.go @@ -8,7 +8,7 @@ import ( "path/filepath" "strings" - gethCommon "github.com/onflow/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/onflow/flow-go/fvm/evm/types" ) diff --git a/fvm/evm/emulator/state/stateDB.go b/fvm/evm/emulator/state/stateDB.go index 13bba412f1b..b0429ad7b7c 100644 --- a/fvm/evm/emulator/state/stateDB.go +++ b/fvm/evm/emulator/state/stateDB.go @@ -6,16 +6,16 @@ import ( "fmt" "sort" + gethCommon "github.com/ethereum/go-ethereum/common" + gethState "github.com/ethereum/go-ethereum/core/state" + gethStateless "github.com/ethereum/go-ethereum/core/stateless" + gethTracing "github.com/ethereum/go-ethereum/core/tracing" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethParams "github.com/ethereum/go-ethereum/params" + gethUtils "github.com/ethereum/go-ethereum/trie/utils" "github.com/holiman/uint256" "github.com/onflow/atree" "github.com/onflow/crypto/hash" - gethCommon "github.com/onflow/go-ethereum/common" - gethState "github.com/onflow/go-ethereum/core/state" - gethStateless "github.com/onflow/go-ethereum/core/stateless" - gethTracing "github.com/onflow/go-ethereum/core/tracing" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethParams "github.com/onflow/go-ethereum/params" - gethUtils "github.com/onflow/go-ethereum/trie/utils" "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/model/flow" @@ -256,6 +256,17 @@ func (db *StateDB) GetCommittedState(addr gethCommon.Address, key gethCommon.Has return value } +// GetStateAndCommittedState returns the current value and the original value. +func (db *StateDB) GetStateAndCommittedState( + addr gethCommon.Address, + key gethCommon.Hash, +) (gethCommon.Hash, gethCommon.Hash) { + origin := db.GetCommittedState(addr, key) + value := db.GetState(addr, key) + + return value, origin +} + // GetState returns the value for the given storage slot func (db *StateDB) GetState(addr gethCommon.Address, key gethCommon.Hash) gethCommon.Hash { state, err := db.latestView().GetState(types.SlotAddress{Address: addr, Key: key}) @@ -273,7 +284,7 @@ func (db *StateDB) GetState(addr gethCommon.Address, key gethCommon.Hash) gethCo // // This behavior is ok for this version of EVM as the only // use case in the EVM right now is here -// https://github.com/onflow/go-ethereum/blob/37590b2c5579c36d846c788c70861685b0ea240e/core/vm/evm.go#L480 +// https://github.com/ethereum/go-ethereum/blob/37590b2c5579c36d846c788c70861685b0ea240e/core/vm/evm.go#L480 // where the value that is returned is compared to empty values to make sure the storage is empty // This endpoint is added mostly to prevent the case that an smart contract is self-destructed // and a later transaction tries to deploy a contract to the same address. @@ -308,12 +319,50 @@ func (db *StateDB) SetTransientState(addr gethCommon.Address, key, value gethCom // AddressInAccessList checks if an address is in the access list func (db *StateDB) AddressInAccessList(addr gethCommon.Address) bool { - return db.latestView().AddressInAccessList(addr) + // For each static call / call / delegate call, the EVM will create + // a snapshot, so that it can revert to it in case of execution errors, + // such as out of gas etc, using `Snapshot` & `RevertToSnapshot`. + // This can create a long list of views, in the order of 4K for certain + // large transactions. To avoid performance issues with DeltaView checking parents, + // which causes deep stacks and function call overhead, we use a plain for-loop instead. + // We iterate through the views in ascending order (from lowest to highest) as an optimization. + // Since addresses are typically added to the AccessList early during transaction execution, + // this allows us to return early when the needed addresses are found in the initial views. + end := len(db.views) + for i := range end { + view := db.views[i] + if view.AddressInAccessList(addr) { + return true + } + } + + return false } // SlotInAccessList checks if the given (address,slot) is in the access list func (db *StateDB) SlotInAccessList(addr gethCommon.Address, key gethCommon.Hash) (addressOk bool, slotOk bool) { - return db.latestView().SlotInAccessList(types.SlotAddress{Address: addr, Key: key}) + slotKey := types.SlotAddress{Address: addr, Key: key} + + // For each static call / call / delegate call, the EVM will create + // a snapshot, so that it can revert to it in case of execution errors, + // such as out of gas etc, using `Snapshot` & `RevertToSnapshot`. + // This can create a long list of views, in the order of 4K for certain + // large transactions. To avoid performance issues with DeltaView checking parents, + // which causes deep stacks and function call overhead, we use a plain for-loop instead. + // We iterate through the views in ascending order (from lowest to highest) as an optimization. + // Since slots are typically added to the AccessList early during transaction execution, + // this allows us to return early when the needed slots are found in the initial views. + addressFound := false + end := len(db.views) + for i := range end { + view := db.views[i] + addressFound, slotFound := view.SlotInAccessList(slotKey) + if slotFound { + return addressFound, true + } + } + + return addressFound, false } // AddAddressToAccessList adds the given address to the access list. diff --git a/fvm/evm/emulator/state/stateDB_test.go b/fvm/evm/emulator/state/stateDB_test.go index 71ae80da776..345d92f5dd4 100644 --- a/fvm/evm/emulator/state/stateDB_test.go +++ b/fvm/evm/emulator/state/stateDB_test.go @@ -4,12 +4,12 @@ import ( "fmt" "testing" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTracing "github.com/ethereum/go-ethereum/core/tracing" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethParams "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" "github.com/onflow/atree" - gethCommon "github.com/onflow/go-ethereum/common" - gethTracing "github.com/onflow/go-ethereum/core/tracing" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethParams "github.com/onflow/go-ethereum/params" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/evm/emulator/state" @@ -88,6 +88,10 @@ func TestStateDB(t *testing.T) { ret = db.GetCommittedState(addr1, key1) require.Equal(t, gethCommon.Hash{}, ret) + currentState, originalState := db.GetStateAndCommittedState(addr1, key1) + require.Equal(t, value1, currentState) + require.Equal(t, gethCommon.Hash{}, originalState) + commit, err := db.Commit(true) require.NoError(t, err) require.NotEmpty(t, commit) @@ -95,6 +99,10 @@ func TestStateDB(t *testing.T) { ret = db.GetCommittedState(addr1, key1) require.Equal(t, value1, ret) + currentState, originalState = db.GetStateAndCommittedState(addr1, key1) + require.Equal(t, value1, currentState) + require.Equal(t, value1, originalState) + // create a new db db, err = state.NewStateDB(ledger, rootAddr) require.NoError(t, err) @@ -106,6 +114,10 @@ func TestStateDB(t *testing.T) { val := db.GetState(addr1, key1) require.NoError(t, db.Error()) require.Equal(t, value1, val) + + currentState, originalState = db.GetStateAndCommittedState(addr1, key1) + require.Equal(t, value1, currentState) + require.Equal(t, value1, originalState) }) t.Run("test snapshot and revert functionality", func(t *testing.T) { diff --git a/fvm/evm/emulator/state/state_growth_test.go b/fvm/evm/emulator/state/state_growth_test.go index 2ba0b12c2c3..a5ffbdea21e 100644 --- a/fvm/evm/emulator/state/state_growth_test.go +++ b/fvm/evm/emulator/state/state_growth_test.go @@ -7,9 +7,9 @@ import ( "strings" "testing" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/tracing" "github.com/holiman/uint256" - "github.com/onflow/go-ethereum/common" - "github.com/onflow/go-ethereum/core/tracing" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/evm/emulator/state" diff --git a/fvm/evm/emulator/state/updateCommitter.go b/fvm/evm/emulator/state/updateCommitter.go index e2c3f331c6e..389c5e6e001 100644 --- a/fvm/evm/emulator/state/updateCommitter.go +++ b/fvm/evm/emulator/state/updateCommitter.go @@ -3,9 +3,9 @@ package state import ( "encoding/binary" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" "github.com/onflow/crypto/hash" - gethCommon "github.com/onflow/go-ethereum/common" ) type OpCode byte diff --git a/fvm/evm/events/events.go b/fvm/evm/events/events.go index abca975d725..b9b2c288b0a 100644 --- a/fvm/evm/events/events.go +++ b/fvm/evm/events/events.go @@ -3,9 +3,9 @@ package events import ( "fmt" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/onflow/cadence" "github.com/onflow/cadence/encoding/ccf" - gethCommon "github.com/onflow/go-ethereum/common" "github.com/onflow/flow-go/fvm/evm/stdlib" "github.com/onflow/flow-go/fvm/evm/types" diff --git a/fvm/evm/events/events_test.go b/fvm/evm/events/events_test.go index 50fd5ba15be..cc36d30e35f 100644 --- a/fvm/evm/events/events_test.go +++ b/fvm/evm/events/events_test.go @@ -9,13 +9,12 @@ import ( "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/go-ethereum/core/vm" - + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/rlp" cdcCommon "github.com/onflow/cadence/common" "github.com/onflow/cadence/encoding/ccf" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" - "github.com/onflow/go-ethereum/rlp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/fvm/evm/events/utils.go b/fvm/evm/events/utils.go index 1fe164d6b54..c03fc240cae 100644 --- a/fvm/evm/events/utils.go +++ b/fvm/evm/events/utils.go @@ -1,8 +1,8 @@ package events import ( + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/onflow/cadence" - gethCommon "github.com/onflow/go-ethereum/common" "github.com/onflow/flow-go/fvm/evm/types" ) diff --git a/fvm/evm/events/utils_test.go b/fvm/evm/events/utils_test.go index 6d59c18d4ce..f306b6cb5e4 100644 --- a/fvm/evm/events/utils_test.go +++ b/fvm/evm/events/utils_test.go @@ -3,8 +3,8 @@ package events import ( "testing" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/onflow/cadence" - gethCommon "github.com/onflow/go-ethereum/common" "github.com/stretchr/testify/require" ) diff --git a/fvm/evm/evm_test.go b/fvm/evm/evm_test.go index a347f4788ff..0a18d71cf0d 100644 --- a/fvm/evm/evm_test.go +++ b/fvm/evm/evm_test.go @@ -7,10 +7,10 @@ import ( "math/big" "testing" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethParams "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/onflow/cadence/encoding/ccf" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethParams "github.com/onflow/go-ethereum/params" - "github.com/onflow/go-ethereum/rlp" "github.com/stretchr/testify/assert" "github.com/onflow/cadence" diff --git a/fvm/evm/handler/addressAllocator_test.go b/fvm/evm/handler/addressAllocator_test.go index df6605f9658..794a4c9342e 100644 --- a/fvm/evm/handler/addressAllocator_test.go +++ b/fvm/evm/handler/addressAllocator_test.go @@ -3,7 +3,7 @@ package handler_test import ( "testing" - gethCommon "github.com/onflow/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/evm/handler" diff --git a/fvm/evm/handler/blockHashList.go b/fvm/evm/handler/blockHashList.go index 0db2aff73f9..635e1b3fc82 100644 --- a/fvm/evm/handler/blockHashList.go +++ b/fvm/evm/handler/blockHashList.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - gethCommon "github.com/onflow/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/model/flow" diff --git a/fvm/evm/handler/blockHashList_test.go b/fvm/evm/handler/blockHashList_test.go index 75a7f682f3a..ebf1b21e1c8 100644 --- a/fvm/evm/handler/blockHashList_test.go +++ b/fvm/evm/handler/blockHashList_test.go @@ -3,7 +3,7 @@ package handler_test import ( "testing" - gethCommon "github.com/onflow/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/evm/handler" diff --git a/fvm/evm/handler/blockstore.go b/fvm/evm/handler/blockstore.go index a8c5975f193..3e1f2581c02 100644 --- a/fvm/evm/handler/blockstore.go +++ b/fvm/evm/handler/blockstore.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - gethCommon "github.com/onflow/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/model/flow" diff --git a/fvm/evm/handler/blockstore_test.go b/fvm/evm/handler/blockstore_test.go index b98a86eabcc..c2e40135949 100644 --- a/fvm/evm/handler/blockstore_test.go +++ b/fvm/evm/handler/blockstore_test.go @@ -4,7 +4,7 @@ import ( "math/big" "testing" - gethCommon "github.com/onflow/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/evm/handler" diff --git a/fvm/evm/handler/handler.go b/fvm/evm/handler/handler.go index 8efd39d4555..04a498cef2b 100644 --- a/fvm/evm/handler/handler.go +++ b/fvm/evm/handler/handler.go @@ -4,9 +4,9 @@ import ( "fmt" "math/big" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/onflow/cadence/common" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" "go.opentelemetry.io/otel/attribute" "github.com/onflow/flow-go/fvm/environment" diff --git a/fvm/evm/handler/handler_test.go b/fvm/evm/handler/handler_test.go index 3db635b5fa8..8dabc3a1646 100644 --- a/fvm/evm/handler/handler_test.go +++ b/fvm/evm/handler/handler_test.go @@ -6,13 +6,13 @@ import ( "math/big" "testing" + gethCommon "github.com/ethereum/go-ethereum/common" + gethCore "github.com/ethereum/go-ethereum/core" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethVM "github.com/ethereum/go-ethereum/core/vm" + gethParams "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/onflow/cadence/common" - gethCommon "github.com/onflow/go-ethereum/common" - gethCore "github.com/onflow/go-ethereum/core" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethVM "github.com/onflow/go-ethereum/core/vm" - gethParams "github.com/onflow/go-ethereum/params" - "github.com/onflow/go-ethereum/rlp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/fvm/evm/impl/abi.go b/fvm/evm/impl/abi.go index 106a6c539ad..3fb0e794dbc 100644 --- a/fvm/evm/impl/abi.go +++ b/fvm/evm/impl/abi.go @@ -6,12 +6,12 @@ import ( "reflect" "strings" + gethABI "github.com/ethereum/go-ethereum/accounts/abi" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/onflow/cadence/common" "github.com/onflow/cadence/errors" "github.com/onflow/cadence/interpreter" "github.com/onflow/cadence/sema" - gethABI "github.com/onflow/go-ethereum/accounts/abi" - gethCommon "github.com/onflow/go-ethereum/common" "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/evm/stdlib" diff --git a/fvm/evm/impl/impl.go b/fvm/evm/impl/impl.go index 07f080344bd..37f772fb9af 100644 --- a/fvm/evm/impl/impl.go +++ b/fvm/evm/impl/impl.go @@ -14,7 +14,7 @@ import ( "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/model/flow" - gethTypes "github.com/onflow/go-ethereum/core/types" + gethTypes "github.com/ethereum/go-ethereum/core/types" ) var internalEVMContractStaticType = interpreter.ConvertSemaCompositeTypeToStaticCompositeType( diff --git a/fvm/evm/offchain/blocks/block_context.go b/fvm/evm/offchain/blocks/block_context.go index ecbc8813c76..680a0e04e34 100644 --- a/fvm/evm/offchain/blocks/block_context.go +++ b/fvm/evm/offchain/blocks/block_context.go @@ -1,8 +1,8 @@ package blocks import ( - gethCommon "github.com/onflow/go-ethereum/common" - "github.com/onflow/go-ethereum/eth/tracers" + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/eth/tracers" "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/model/flow" diff --git a/fvm/evm/offchain/blocks/blocks.go b/fvm/evm/offchain/blocks/blocks.go index 0d4808c8154..35b8c39638f 100644 --- a/fvm/evm/offchain/blocks/blocks.go +++ b/fvm/evm/offchain/blocks/blocks.go @@ -3,7 +3,7 @@ package blocks import ( "fmt" - gethCommon "github.com/onflow/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/onflow/flow-go/fvm/evm/handler" "github.com/onflow/flow-go/fvm/evm/types" diff --git a/fvm/evm/offchain/blocks/meta.go b/fvm/evm/offchain/blocks/meta.go index c47ed71fdbc..9af31333a17 100644 --- a/fvm/evm/offchain/blocks/meta.go +++ b/fvm/evm/offchain/blocks/meta.go @@ -4,7 +4,7 @@ import ( "encoding/binary" "fmt" - gethCommon "github.com/onflow/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" ) const ( diff --git a/fvm/evm/offchain/query/view.go b/fvm/evm/offchain/query/view.go index bb1647ec657..cf493ca1b03 100644 --- a/fvm/evm/offchain/query/view.go +++ b/fvm/evm/offchain/query/view.go @@ -5,11 +5,11 @@ import ( "fmt" "math/big" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" + gethTracers "github.com/ethereum/go-ethereum/eth/tracers" "github.com/holiman/uint256" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethCrypto "github.com/onflow/go-ethereum/crypto" - gethTracers "github.com/onflow/go-ethereum/eth/tracers" "github.com/onflow/flow-go/fvm/evm/emulator" "github.com/onflow/flow-go/fvm/evm/emulator/state" diff --git a/fvm/evm/offchain/sync/replay.go b/fvm/evm/offchain/sync/replay.go index 6c5d66445ce..a9fe6b2f955 100644 --- a/fvm/evm/offchain/sync/replay.go +++ b/fvm/evm/offchain/sync/replay.go @@ -4,10 +4,10 @@ import ( "bytes" "fmt" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethTracer "github.com/ethereum/go-ethereum/eth/tracers" + gethTrie "github.com/ethereum/go-ethereum/trie" "github.com/onflow/atree" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethTracer "github.com/onflow/go-ethereum/eth/tracers" - gethTrie "github.com/onflow/go-ethereum/trie" "github.com/onflow/flow-go/fvm/evm/emulator" "github.com/onflow/flow-go/fvm/evm/events" diff --git a/fvm/evm/offchain/sync/replayer.go b/fvm/evm/offchain/sync/replayer.go index 96df01d58a0..e7d33234739 100644 --- a/fvm/evm/offchain/sync/replayer.go +++ b/fvm/evm/offchain/sync/replayer.go @@ -1,7 +1,7 @@ package sync import ( - gethTracers "github.com/onflow/go-ethereum/eth/tracers" + gethTracers "github.com/ethereum/go-ethereum/eth/tracers" "github.com/rs/zerolog" "github.com/onflow/flow-go/fvm/evm/events" diff --git a/fvm/evm/offchain/sync/replayer_test.go b/fvm/evm/offchain/sync/replayer_test.go index 06262b5811e..6144c3e3ee8 100644 --- a/fvm/evm/offchain/sync/replayer_test.go +++ b/fvm/evm/offchain/sync/replayer_test.go @@ -5,7 +5,7 @@ import ( "math/big" "testing" - gethCommon "github.com/onflow/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/rs/zerolog" "github.com/stretchr/testify/require" diff --git a/fvm/evm/precompiles/abi.go b/fvm/evm/precompiles/abi.go index 03f13ca4d49..3d805d9475f 100644 --- a/fvm/evm/precompiles/abi.go +++ b/fvm/evm/precompiles/abi.go @@ -5,7 +5,7 @@ import ( "errors" "math/big" - gethCommon "github.com/onflow/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" ) // This package provides fast and efficient diff --git a/fvm/evm/precompiles/abi_test.go b/fvm/evm/precompiles/abi_test.go index 6ec9877a3d1..f77804017ee 100644 --- a/fvm/evm/precompiles/abi_test.go +++ b/fvm/evm/precompiles/abi_test.go @@ -5,7 +5,7 @@ import ( "math/big" "testing" - gethCommon "github.com/onflow/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/evm/precompiles" diff --git a/fvm/evm/precompiles/selector.go b/fvm/evm/precompiles/selector.go index 1495ba56028..a62c8f5b9ac 100644 --- a/fvm/evm/precompiles/selector.go +++ b/fvm/evm/precompiles/selector.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - gethCrypto "github.com/onflow/go-ethereum/crypto" + gethCrypto "github.com/ethereum/go-ethereum/crypto" ) const FunctionSelectorLength = 4 diff --git a/fvm/evm/precompiles/selector_test.go b/fvm/evm/precompiles/selector_test.go index eb41203ef86..d6f36b9fffe 100644 --- a/fvm/evm/precompiles/selector_test.go +++ b/fvm/evm/precompiles/selector_test.go @@ -3,7 +3,7 @@ package precompiles_test import ( "testing" - gethCrypto "github.com/onflow/go-ethereum/crypto" + gethCrypto "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/evm/precompiles" diff --git a/fvm/evm/stdlib/contract.cdc b/fvm/evm/stdlib/contract.cdc index 8f21c3544a6..66eb01b40a7 100644 --- a/fvm/evm/stdlib/contract.cdc +++ b/fvm/evm/stdlib/contract.cdc @@ -3,21 +3,34 @@ import "NonFungibleToken" import "FungibleToken" import "FlowToken" -access(all) -contract EVM { +/* - // Entitlements enabling finer-grained access control on a CadenceOwnedAccount - access(all) entitlement Validate - access(all) entitlement Withdraw - access(all) entitlement Call - access(all) entitlement Deploy - access(all) entitlement Owner - access(all) entitlement Bridge + The Flow EVM contract defines important types and functionality + to allow Cadence code and Flow SDKs to interface + with the Etherem Virtual Machine environment on Flow. + + The EVM contract emits events when relevant actions happen in Flow EVM + such as creating new blocks, executing transactions, and bridging FLOW + + This contract also defines Cadence-Owned Account functionality, + which is currently the only way for Cadence code to interact with Flow EVM. + + Additionally, functionality is provided for common EVM types + such as addresses, balances, ABIs, transaction results, and more. + + The EVM contract is deployed to the Flow Service Account on every network + and many of its functionality is directly connected to the protocol software + to allow interaction with the EVM. + + See additional EVM documentation here: https://developers.flow.com/evm/about + +*/ + +access(all) contract EVM { /// Block executed event is emitted when a new block is created, /// which always happens when a transaction is executed. - access(all) - event BlockExecuted( + access(all) event BlockExecuted ( // height or number of the block height: UInt64, // hash of the block @@ -40,8 +53,7 @@ contract EVM { /// Transaction executed event is emitted every time a transaction /// is executed by the EVM (even if failed). - access(all) - event TransactionExecuted( + access(all) event TransactionExecuted ( // hash of the transaction hash: [UInt8; 32], // index of the transaction in a block @@ -79,17 +91,13 @@ contract EVM { stateUpdateChecksum: [UInt8; 4] ) - access(all) - event CadenceOwnedAccountCreated(address: String) - /// FLOWTokensDeposited is emitted when FLOW tokens is bridged /// into the EVM environment. Note that this event is not emitted /// for transfer of flow tokens between two EVM addresses. /// Similar to the FungibleToken.Deposited event /// this event includes a depositedUUID that captures the /// uuid of the source vault. - access(all) - event FLOWTokensDeposited( + access(all) event FLOWTokensDeposited ( address: String, amount: UFix64, depositedUUID: UInt64, @@ -102,8 +110,7 @@ contract EVM { /// similar to the FungibleToken.Withdrawn events /// this event includes a withdrawnUUID that captures the /// uuid of the returning vault. - access(all) - event FLOWTokensWithdrawn( + access(all) event FLOWTokensWithdrawn ( address: String, amount: UFix64, withdrawnUUID: UInt64, @@ -113,8 +120,7 @@ contract EVM { /// BridgeAccessorUpdated is emitted when the BridgeAccessor Capability /// is updated in the stored BridgeRouter along with identifying /// information about both. - access(all) - event BridgeAccessorUpdated( + access(all) event BridgeAccessorUpdated ( routerType: Type, routerUUID: UInt64, routerAddress: Address, @@ -123,13 +129,35 @@ contract EVM { accessorAddress: Address ) - /// EVMAddress is an EVM-compatible address + /// Block returns information about the latest executed block. + access(all) struct EVMBlock { + access(all) let height: UInt64 + + access(all) let hash: String + + access(all) let totalSupply: Int + + access(all) let timestamp: UInt64 + + init(height: UInt64, hash: String, totalSupply: Int, timestamp: UInt64) { + self.height = height + self.hash = hash + self.totalSupply = totalSupply + self.timestamp = timestamp + } + } + + /// Returns the latest executed block. access(all) - struct EVMAddress { + fun getLatestBlock(): EVMBlock { + return InternalEVM.getLatestBlock() as! EVMBlock + } + + /// EVMAddress is an EVM-compatible address + access(all) struct EVMAddress { /// Bytes of the address - access(all) - let bytes: [UInt8; 20] + access(all) let bytes: [UInt8; 20] /// Constructs a new EVM address from the given byte representation view init(bytes: [UInt8; 20]) { @@ -174,7 +202,8 @@ contract EVM { fun deposit(from: @FlowToken.Vault) { let amount = from.balance if amount == 0.0 { - panic("calling deposit function with an empty vault is not allowed") + destroy from + return } let depositedUUID = from.uuid InternalEVM.deposit( @@ -203,14 +232,26 @@ contract EVM { } } + /// Converts a hex string to an EVM address if the string is a valid hex string + /// Future implementations should pass data to InternalEVM for native deserialization + access(all) + fun addressFromString(_ asHex: String): EVMAddress { + pre { + asHex.length == 40 || asHex.length == 42: + "EVM.addressFromString(): Invalid hex string length for an EVM address. The provided string is \(asHex.length), but the length must be 40 or 42." + } + // Strip the 0x prefix if it exists + var withoutPrefix = (asHex[1] == "x" ? asHex.slice(from: 2, upTo: asHex.length) : asHex).toLower() + let bytes = withoutPrefix.decodeHex().toConstantSized<[UInt8; 20]>()! + return EVMAddress(bytes: bytes) + } + /// EVMBytes is a type wrapper used for ABI encoding/decoding into /// Solidity `bytes` type - access(all) - struct EVMBytes { + access(all) struct EVMBytes { /// Byte array representing the `bytes` value - access(all) - let value: [UInt8] + access(all) let value: [UInt8] view init(value: [UInt8]) { self.value = value @@ -219,12 +260,10 @@ contract EVM { /// EVMBytes4 is a type wrapper used for ABI encoding/decoding into /// Solidity `bytes4` type - access(all) - struct EVMBytes4 { + access(all) struct EVMBytes4 { /// Byte array representing the `bytes4` value - access(all) - let value: [UInt8; 4] + access(all) let value: [UInt8; 4] view init(value: [UInt8; 4]) { self.value = value @@ -233,40 +272,23 @@ contract EVM { /// EVMBytes32 is a type wrapper used for ABI encoding/decoding into /// Solidity `bytes32` type - access(all) - struct EVMBytes32 { + access(all) struct EVMBytes32 { /// Byte array representing the `bytes32` value - access(all) - let value: [UInt8; 32] + access(all) let value: [UInt8; 32] view init(value: [UInt8; 32]) { self.value = value } } - /// Converts a hex string to an EVM address if the string is a valid hex string - /// Future implementations should pass data to InternalEVM for native deserialization - access(all) - fun addressFromString(_ asHex: String): EVMAddress { - pre { - asHex.length == 40 || asHex.length == 42: "Invalid hex string length for an EVM address" - } - // Strip the 0x prefix if it exists - var withoutPrefix = (asHex[1] == "x" ? asHex.slice(from: 2, upTo: asHex.length) : asHex).toLower() - let bytes = withoutPrefix.decodeHex().toConstantSized<[UInt8; 20]>()! - return EVMAddress(bytes: bytes) - } - - access(all) - struct Balance { + access(all) struct Balance { /// The balance in atto-FLOW /// Atto-FLOW is the smallest denomination of FLOW (1e18 FLOW) /// that is used to store account balances inside EVM /// similar to the way WEI is used to store ETH divisible to 18 decimal places. - access(all) - var attoflow: UInt + access(all) var attoflow: UInt /// Constructs a new balance access(all) @@ -284,7 +306,7 @@ contract EVM { /// Casts the balance to a UFix64 (rounding down) /// Warning! casting a balance to a UFix64 which supports a lower level of precision /// (8 decimal points in compare to 18) might result in rounding down error. - /// Use the toAttoFlow function if you care need more accuracy. + /// Use the inAttoFlow function if you need more accuracy. access(all) view fun inFLOW(): UFix64 { return InternalEVM.castToFLOW(balance: self.attoflow) @@ -305,48 +327,44 @@ contract EVM { /// reports the status of evm execution. access(all) enum Status: UInt8 { - /// is (rarely) returned when status is unknown + /// Returned (rarely) when status is unknown /// and something has gone very wrong. access(all) case unknown - /// is returned when execution of an evm transaction/call + /// Returned when execution of an evm transaction/call /// has failed at the validation step (e.g. nonce mismatch). /// An invalid transaction/call is rejected to be executed /// or be included in a block. access(all) case invalid - /// is returned when execution of an evm transaction/call - /// has been successful but the vm has reported an error as + /// Returned when execution of an evm transaction/call + /// has been successful but the vm has reported an error in /// the outcome of execution (e.g. running out of gas). /// A failed tx/call is included in a block. /// Note that resubmission of a failed transaction would /// result in invalid status in the second attempt, given - /// the nonce would be come invalid. + /// the nonce would become invalid. access(all) case failed - /// is returned when execution of an evm transaction/call + /// Returned when execution of an evm transaction/call /// has been successful and no error is reported by the vm. access(all) case successful } - /// reports the outcome of evm transaction/call execution attempt + /// Reports the outcome of an evm transaction/call execution attempt access(all) struct Result { /// status of the execution - access(all) - let status: Status + access(all) let status: Status /// error code (error code zero means no error) - access(all) - let errorCode: UInt64 + access(all) let errorCode: UInt64 /// error message - access(all) - let errorMessage: String + access(all) let errorMessage: String /// returns the amount of gas metered during /// evm execution - access(all) - let gasUsed: UInt64 + access(all) let gasUsed: UInt64 /// returns the data that is returned from /// the evm for the call. For coa.deploy @@ -354,14 +372,12 @@ contract EVM { /// the address provided in the contractAddress field. /// in case of revert, the smart contract custom error message /// is also returned here (see EIP-140 for more details). - access(all) - let data: [UInt8] + access(all) let data: [UInt8] /// returns the newly deployed contract address /// if the transaction caused such a deployment /// otherwise the value is nil. - access(all) - let deployedContract: EVMAddress? + access(all) let deployedContract: EVMAddress? init( status: Status, @@ -385,18 +401,61 @@ contract EVM { } } - access(all) - resource interface Addressable { - /// The EVM address + /* + Cadence-Owned Accounts (COA) + A COA is a natively supported EVM smart contract wallet type + that allows a Cadence resource to own and control an EVM address. + This native wallet provides the primitives needed to bridge + or control assets across Flow EVM and Cadence. + From the EVM perspective, COAs are smart contract wallets + that accept native token transfers and support several ERCs + including ERC-165, ERC-721, ERC-777, ERC-1155, ERC-1271. + + COAs are not controlled by a key. + Instead, every COA account has a unique resource accessible + on the Cadence side, and anyone who owns that resource submits transactions + on behalf of this address. These direct transactions have COA’s EVM address + as the tx.origin and a new EVM transaction type (TxType = 0xff) + is used to differentiate these transactions from other types + of EVM transactions (e.g, DynamicFeeTxType (0x02). + + Because of this, users are never able to access a key for their account, + meaning that they cannot control their COA's address on other EVM blockchains. + */ + + /* Entitlements enabling finer-grained access control on a CadenceOwnedAccount */ + + /// Allows validating ownership of a COA + access(all) entitlement Validate + + /// Allows withdrawing FLOW from the COA back to Cadence + access(all) entitlement Withdraw + + /// Allows sending Call transactions from the COA + access(all) entitlement Call + + /// Allows sending deploy contract transactions from the COA + access(all) entitlement Deploy + + /// Allows access to all the privliged functionality on a COA + access(all) entitlement Owner + + /// Allows access to all bridging functionality for COAs + access(all) entitlement Bridge + + /// Event that indicates when a new COA is created + access(all) event CadenceOwnedAccountCreated(address: String, uuid: UInt64) + + /// Interface for types that have an associated EVM address + access(all) resource interface Addressable { + /// Gets the EVM address access(all) view fun address(): EVMAddress } - access(all) - resource CadenceOwnedAccount: Addressable { + access(all) resource CadenceOwnedAccount: Addressable { - access(self) - var addressBytes: [UInt8; 20] + access(self) var addressBytes: [UInt8; 20] init() { // address is initially set to zero @@ -406,36 +465,49 @@ contract EVM { self.addressBytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + /// Sets the EVM address for the COA. Only callable once on initial creation. + /// + /// @param addressBytes: The 20 byte EVM address + /// + /// @return the token decimals of the ERC20 access(contract) fun initAddress(addressBytes: [UInt8; 20]) { - // only allow set address for the first time - // check address is empty - for item in self.addressBytes { - assert(item == 0, message: "address byte is not empty") + // only allow set address for the first time + // check address is empty + pre { + self.addressBytes == [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]: + "EVM.CadenceOwnedAccount.initAddress(): Cannot initialize the address bytes if it has already been set!" } self.addressBytes = addressBytes } - /// The EVM address of the cadence owned account + /// Gets The EVM address of the cadence owned account + /// access(all) view fun address(): EVMAddress { // Always create a new EVMAddress instance return EVMAddress(bytes: self.addressBytes) } - /// Get balance of the cadence owned account + /// Gets the balance of the cadence owned account + /// access(all) view fun balance(): Balance { return self.address().balance() } /// Deposits the given vault into the cadence owned account's balance + /// + /// @param from: The FlowToken Vault to deposit to this cadence owned account + /// + /// @return the token decimals of the ERC20 access(all) fun deposit(from: @FlowToken.Vault) { self.address().deposit(from: <-from) } - /// The EVM address of the cadence owned account behind an entitlement, acting as proof of access + /// Gets the EVM address of the cadence owned account behind an entitlement, + /// acting as proof of access access(Owner | Validate) view fun protectedAddress(): EVMAddress { return self.address() @@ -446,10 +518,14 @@ contract EVM { /// given that Flow Token Vaults use UFix64s to store balances. /// If the given balance conversion to UFix64 results in /// rounding error, this function would fail. + /// + /// @param balance: The EVM balance to withdraw + /// + /// @return A FlowToken Vault with the requested balance access(Owner | Withdraw) fun withdraw(balance: Balance): @FlowToken.Vault { if balance.isZero() { - panic("calling withdraw function with zero balance is not allowed") + return <-FlowToken.createEmptyVault(vaultType: Type<@FlowToken.Vault>()) } let vault <- InternalEVM.withdraw( from: self.addressBytes, @@ -467,6 +543,12 @@ contract EVM { /// Deploys a contract to the EVM environment. /// Returns the result which contains address of /// the newly deployed contract + /// + /// @param code: The bytecode of the Solidity contract + /// @param gasLimit: The EVM Gas limit for the deployment transaction + /// @param value: The value, as an EVM.Balance object, to send with the deployment + /// + /// @return The EVM transaction result access(Owner | Deploy) fun deploy( code: [UInt8], @@ -518,8 +600,12 @@ contract EVM { ) as! Result } - /// Bridges the given NFT to the EVM environment, requiring a Provider from which to withdraw a fee to fulfill - /// the bridge request + /// Bridges the given NFT to the EVM environment, requiring a Provider + /// from which to withdraw a fee to fulfill the bridge request + /// + /// @param nft: The NFT to bridge to the COA's address in Flow EVM + /// @param feeProvider: A Withdraw entitled Provider reference to a FlowToken Vault + /// that contains the fees to be taken to pay for bridging access(all) fun depositNFT( nft: @{NonFungibleToken.NFT}, @@ -528,8 +614,16 @@ contract EVM { EVM.borrowBridgeAccessor().depositNFT(nft: <-nft, to: self.address(), feeProvider: feeProvider) } - /// Bridges the given NFT from the EVM environment, requiring a Provider from which to withdraw a fee to fulfill - /// the bridge request. Note: the caller should own the requested NFT in EVM + /// Bridges the given NFT from the EVM environment, requiring a Provider + /// from which to withdraw a fee to fulfill the bridge request. + /// Note: the caller has to own the requested NFT in EVM + /// + /// @param type: The Cadence type of the NFT to withdraw + /// @param id: The EVM ERC721 ID of the NFT to withdraw + /// @param feeProvider: A Withdraw entitled Provider reference to a FlowToken Vault + /// that contains the fees to be taken to pay for bridging + /// + /// @return The requested NFT access(Owner | Bridge) fun withdrawNFT( type: Type, @@ -544,8 +638,7 @@ contract EVM { ) } - /// Bridges the given Vault to the EVM environment, requiring a Provider from which to withdraw a fee to fulfill - /// the bridge request + /// Bridges the given Vault to the EVM environment access(all) fun depositTokens( vault: @{FungibleToken.Vault}, @@ -579,12 +672,18 @@ contract EVM { let addr = InternalEVM.createCadenceOwnedAccount(uuid: acc.uuid) acc.initAddress(addressBytes: addr) - emit CadenceOwnedAccountCreated(address: acc.address().toString()) + emit CadenceOwnedAccountCreated(address: acc.address().toString(), uuid: acc.uuid) return <-acc } /// Runs an a RLP-encoded EVM transaction, deducts the gas fees, /// and deposits the gas fees into the provided coinbase address. + /// + /// @param tx: The rlp-encoded transaction to run + /// @param coinbase: The address of entity to receive the transaction fees + /// for relaying the transaction + /// + /// @return: The transaction result access(all) fun run(tx: [UInt8], coinbase: EVMAddress): Result { return InternalEVM.run( @@ -593,8 +692,8 @@ contract EVM { ) as! Result } - /// mustRun runs the transaction using EVM.run yet it - /// rollback if the tx execution status is unknown or invalid. + /// mustRun runs the transaction using EVM.run + /// It will rollback if the tx execution status is unknown or invalid. /// Note that this method does not rollback if transaction /// is executed but an vm error is reported as the outcome /// of the execution (status: failed). @@ -603,7 +702,7 @@ contract EVM { let runResult = self.run(tx: tx, coinbase: coinbase) assert( runResult.status == Status.failed || runResult.status == Status.successful, - message: "tx is not valid for execution" + message: "EVM.mustRun(): The provided transaction is not valid for execution" ) return runResult } @@ -686,7 +785,7 @@ contract EVM { for byte in methodID { if byte != data.removeFirst() { - panic("signature mismatch") + panic("EVM.decodeABIWithSignature(): Cannot decode! The signature does not match the provided data.") } } @@ -694,13 +793,13 @@ contract EVM { } /// ValidationResult returns the result of COA ownership proof validation - access(all) - struct ValidationResult { - access(all) - let isValid: Bool + access(all) struct ValidationResult { - access(all) - let problem: String? + access(all) let isValid: Bool + + /// If there was a problem with validation, this describes + /// what the problem was + access(all) let problem: String? init(isValid: Bool, problem: String?) { self.isValid = isValid @@ -723,7 +822,8 @@ contract EVM { if keyIndices.length != signatures.length { return ValidationResult( isValid: false, - problem: "key indices size doesn't match the signatures" + problem: "EVM.validateCOAOwnershipProof(): Key indices array length" + .concat(" doesn't match the signatures array length!") ) } @@ -734,7 +834,7 @@ contract EVM { let keyList = Crypto.KeyList() var keyListLength = 0 let seenAccountKeyIndices: {Int: Int} = {} - for signatureIndex, signature in signatures{ + for signatureIndex, signature in signatures { // index of the key on the account let accountKeyIndex = Int(keyIndices[signatureIndex]!) // index of the key in the key list @@ -746,7 +846,8 @@ contract EVM { if key.isRevoked { return ValidationResult( isValid: false, - problem: "account key is revoked" + problem: "EVM.validateCOAOwnershipProof(): Cannot validate COA ownership" + .concat(" for Cadence account \(address). The account key at index \(accountKeyIndex) is revoked.") ) } @@ -765,7 +866,8 @@ contract EVM { } else { return ValidationResult( isValid: false, - problem: "invalid key index" + problem: "EVM.validateCOAOwnershipProof(): Cannot validate COA ownership" + .concat(" for Cadence account \(address). The key index \(accountKeyIndex) is invalid.") ) } } else { @@ -790,7 +892,8 @@ contract EVM { if !isValid{ return ValidationResult( isValid: false, - problem: "the given signatures are not valid or provide enough weight" + problem: "EVM.validateCOAOwnershipProof(): Cannot validate COA ownership" + .concat(" for Cadence account \(address). The given signatures are not valid or provide enough weight.") ) } @@ -798,7 +901,8 @@ contract EVM { if coaRef == nil { return ValidationResult( isValid: false, - problem: "could not borrow bridge account's resource" + problem: "EVM.validateCOAOwnershipProof(): Cannot validate COA ownership. " + .concat("Could not borrow the COA resource for account \(address).") ) } @@ -808,7 +912,8 @@ contract EVM { if item != evmAddress[index] { return ValidationResult( isValid: false, - problem: "evm address mismatch" + problem: "EVM.validateCOAOwnershipProof(): Cannot validate COA ownership." + .concat("The provided evm address does not match the account's COA address.") ) } } @@ -819,38 +924,8 @@ contract EVM { ) } - /// Block returns information about the latest executed block. - access(all) - struct EVMBlock { - access(all) - let height: UInt64 - - access(all) - let hash: String - - access(all) - let totalSupply: Int - - access(all) - let timestamp: UInt64 - - init(height: UInt64, hash: String, totalSupply: Int, timestamp: UInt64) { - self.height = height - self.hash = hash - self.totalSupply = totalSupply - self.timestamp = timestamp - } - } - - /// Returns the latest executed block. - access(all) - fun getLatestBlock(): EVMBlock { - return InternalEVM.getLatestBlock() as! EVMBlock - } - /// Interface for a resource which acts as an entrypoint to the VM bridge - access(all) - resource interface BridgeAccessor { + access(all) resource interface BridgeAccessor { /// Endpoint enabling the bridging of an NFT to EVM access(Bridge) @@ -887,21 +962,23 @@ contract EVM { ): @{FungibleToken.Vault} } - /// Interface which captures a Capability to the bridge Accessor, saving it within the BridgeRouter resource - access(all) - resource interface BridgeRouter { + /// Interface which captures a Capability to the bridge Accessor, + /// saving it within the BridgeRouter resource + access(all) resource interface BridgeRouter { - /// Returns a reference to the BridgeAccessor designated for internal bridge requests + /// Returns a reference to the BridgeAccessor designated + /// for internal bridge requests access(Bridge) view fun borrowBridgeAccessor(): auth(Bridge) &{BridgeAccessor} /// Sets the BridgeAccessor Capability in the BridgeRouter access(Bridge) fun setBridgeAccessor(_ accessor: Capability) { pre { - accessor.check(): "Invalid BridgeAccessor Capability provided" + accessor.check(): + "EVM.setBridgeAccessor(): Invalid BridgeAccessor Capability provided" emit BridgeAccessorUpdated( routerType: self.getType(), routerUUID: self.uuid, - routerAddress: self.owner?.address ?? panic("Router must have an owner to be identified"), + routerAddress: self.owner?.address ?? panic("EVM.setBridgeAccessor(): Router must be stored in an account's storage"), accessorType: accessor.borrow()!.getType(), accessorUUID: accessor.borrow()!.uuid, accessorAddress: accessor.address @@ -915,16 +992,17 @@ contract EVM { view fun borrowBridgeAccessor(): auth(Bridge) &{BridgeAccessor} { return self.account.storage.borrow(from: /storage/evmBridgeRouter) ?.borrowBridgeAccessor() - ?? panic("Could not borrow reference to the EVM bridge") + ?? panic("EVM.borrowBridgeAccessor(): Could not borrow a reference to the EVM bridge.") } /// The Heartbeat resource controls the block production. - /// It is stored in the storage and used in the Flow protocol to call the heartbeat function once per block. - access(all) - resource Heartbeat { - /// heartbeat calls commit block proposals and forms new blocks including all the - /// recently executed transactions. - /// The Flow protocol makes sure to call this function once per block as a system call. + /// It is stored in the storage and used in the Flow protocol + /// to call the heartbeat function once per block. + access(all) resource Heartbeat { + /// heartbeat calls commit block proposals and forms new blocks + /// including all the recently executed transactions. + /// The Flow protocol makes sure to call this function + /// once per block as a system call. access(all) fun heartbeat() { InternalEVM.commitBlockProposal() diff --git a/fvm/evm/stdlib/contract_test.go b/fvm/evm/stdlib/contract_test.go index ca79e8ccb85..180ed598319 100644 --- a/fvm/evm/stdlib/contract_test.go +++ b/fvm/evm/stdlib/contract_test.go @@ -8,6 +8,8 @@ import ( "strings" "testing" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/onflow/cadence" "github.com/onflow/cadence/common" "github.com/onflow/cadence/encoding/json" @@ -18,8 +20,6 @@ import ( . "github.com/onflow/cadence/test_utils/runtime_utils" coreContracts "github.com/onflow/flow-core-contracts/lib/go/contracts" coreContractstemplates "github.com/onflow/flow-core-contracts/lib/go/templates" - gethTypes "github.com/onflow/go-ethereum/core/types" - "github.com/onflow/go-ethereum/crypto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -3665,7 +3665,7 @@ func TestEVMDecodeABIWithSignatureMismatch(t *testing.T) { }, ) require.Error(t, err) - assert.ErrorContains(t, err, "panic: signature mismatch") + assert.ErrorContains(t, err, "EVM.decodeABIWithSignature(): Cannot decode! The signature does not match the provided data.") } func TestEVMAddressConstructionAndReturn(t *testing.T) { @@ -5998,7 +5998,7 @@ func TestEVMValidateCOAOwnershipProof(t *testing.T) { message := result.(cadence.Struct). SearchFieldByName("problem").(cadence.Optional). Value.(cadence.String).String() - require.Equal(t, "\"the given signatures are not valid or provide enough weight\"", message) + require.Equal(t, "\"EVM.validateCOAOwnershipProof(): Cannot validate COA ownership for Cadence account 0x0000000000000001. The given signatures are not valid or provide enough weight.\"", message) }) } diff --git a/fvm/evm/testutils/accounts.go b/fvm/evm/testutils/accounts.go index 41d6133f323..ea24d351c89 100644 --- a/fvm/evm/testutils/accounts.go +++ b/fvm/evm/testutils/accounts.go @@ -8,9 +8,9 @@ import ( "sync" "testing" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethCrypto "github.com/onflow/go-ethereum/crypto" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" "github.com/onflow/atree" diff --git a/fvm/evm/testutils/contract.go b/fvm/evm/testutils/contract.go index 424877c9ce2..0ad57014290 100644 --- a/fvm/evm/testutils/contract.go +++ b/fvm/evm/testutils/contract.go @@ -6,8 +6,8 @@ import ( "strings" "testing" + gethABI "github.com/ethereum/go-ethereum/accounts/abi" "github.com/onflow/atree" - gethABI "github.com/onflow/go-ethereum/accounts/abi" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/evm/emulator" diff --git a/fvm/evm/testutils/emulator.go b/fvm/evm/testutils/emulator.go index 244e240225b..37d3f9fcb5e 100644 --- a/fvm/evm/testutils/emulator.go +++ b/fvm/evm/testutils/emulator.go @@ -3,9 +3,9 @@ package testutils import ( "math/big" - gethCommon "github.com/onflow/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" + gethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/onflow/flow-go/fvm/evm/types" ) diff --git a/fvm/evm/testutils/misc.go b/fvm/evm/testutils/misc.go index 82ae2de6ce3..32e625ee44f 100644 --- a/fvm/evm/testutils/misc.go +++ b/fvm/evm/testutils/misc.go @@ -6,9 +6,9 @@ import ( "math/rand" "testing" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/holiman/uint256" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/evm/types" diff --git a/fvm/evm/types/address.go b/fvm/evm/types/address.go index cd0aca85c35..cc27dac5577 100644 --- a/fvm/evm/types/address.go +++ b/fvm/evm/types/address.go @@ -5,10 +5,10 @@ import ( "encoding/hex" "fmt" + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/onflow/cadence" "github.com/onflow/cadence/encoding/ccf" "github.com/onflow/cadence/sema" - gethCommon "github.com/onflow/go-ethereum/common" "github.com/onflow/flow-go/model/flow" ) diff --git a/fvm/evm/types/block.go b/fvm/evm/types/block.go index f4123b71ddc..a9f48f581c9 100644 --- a/fvm/evm/types/block.go +++ b/fvm/evm/types/block.go @@ -5,11 +5,11 @@ import ( "math/big" "time" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethCrypto "github.com/onflow/go-ethereum/crypto" - gethRLP "github.com/onflow/go-ethereum/rlp" - gethTrie "github.com/onflow/go-ethereum/trie" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" + gethRLP "github.com/ethereum/go-ethereum/rlp" + gethTrie "github.com/ethereum/go-ethereum/trie" "github.com/onflow/flow-go/model/flow" ) diff --git a/fvm/evm/types/block_test.go b/fvm/evm/types/block_test.go index a87f3eca008..03e6ca530f7 100644 --- a/fvm/evm/types/block_test.go +++ b/fvm/evm/types/block_test.go @@ -4,9 +4,9 @@ import ( "math/big" "testing" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethRLP "github.com/onflow/go-ethereum/rlp" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethRLP "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/fvm/evm/types/call.go b/fvm/evm/types/call.go index 598df6519c2..c8dd7efae97 100644 --- a/fvm/evm/types/call.go +++ b/fvm/evm/types/call.go @@ -4,11 +4,11 @@ import ( "fmt" "math/big" - gethCommon "github.com/onflow/go-ethereum/common" - gethCore "github.com/onflow/go-ethereum/core" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethParams "github.com/onflow/go-ethereum/params" - "github.com/onflow/go-ethereum/rlp" + gethCommon "github.com/ethereum/go-ethereum/common" + gethCore "github.com/ethereum/go-ethereum/core" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethParams "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" ) const ( diff --git a/fvm/evm/types/call_test.go b/fvm/evm/types/call_test.go index 8c57600968f..0bc63b9d017 100644 --- a/fvm/evm/types/call_test.go +++ b/fvm/evm/types/call_test.go @@ -6,8 +6,8 @@ import ( "math/big" "testing" - gethTypes "github.com/onflow/go-ethereum/core/types" - "github.com/onflow/go-ethereum/rlp" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/fvm/evm/types/codeFinder.go b/fvm/evm/types/codeFinder.go index 1163fca547e..8f987f3ecb3 100644 --- a/fvm/evm/types/codeFinder.go +++ b/fvm/evm/types/codeFinder.go @@ -4,8 +4,8 @@ import ( "errors" "fmt" - gethCore "github.com/onflow/go-ethereum/core" - gethVM "github.com/onflow/go-ethereum/core/vm" + gethCore "github.com/ethereum/go-ethereum/core" + gethVM "github.com/ethereum/go-ethereum/core/vm" ) func ValidationErrorCode(err error) ErrorCode { diff --git a/fvm/evm/types/emulator.go b/fvm/evm/types/emulator.go index e0e883d89f4..9ec1636acf6 100644 --- a/fvm/evm/types/emulator.go +++ b/fvm/evm/types/emulator.go @@ -4,10 +4,10 @@ import ( "math" "math/big" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethCrypto "github.com/onflow/go-ethereum/crypto" - "github.com/onflow/go-ethereum/eth/tracers" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/tracers" ) var ( diff --git a/fvm/evm/types/handler.go b/fvm/evm/types/handler.go index 7f8c61368a5..4e4548e9c93 100644 --- a/fvm/evm/types/handler.go +++ b/fvm/evm/types/handler.go @@ -1,8 +1,8 @@ package types import ( + gethCommon "github.com/ethereum/go-ethereum/common" "github.com/onflow/cadence/common" - gethCommon "github.com/onflow/go-ethereum/common" ) // EVM is an account inside FVM with special access to the underlying infrastructure diff --git a/fvm/evm/types/precompiled.go b/fvm/evm/types/precompiled.go index 1215094a045..1344280b69a 100644 --- a/fvm/evm/types/precompiled.go +++ b/fvm/evm/types/precompiled.go @@ -4,8 +4,8 @@ import ( "bytes" "fmt" - gethVM "github.com/onflow/go-ethereum/core/vm" - "github.com/onflow/go-ethereum/rlp" + gethVM "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/rlp" ) // PrecompiledContract wraps gethVM precompiles with diff --git a/fvm/evm/types/proof.go b/fvm/evm/types/proof.go index 789d557897c..6d2ed3803ab 100644 --- a/fvm/evm/types/proof.go +++ b/fvm/evm/types/proof.go @@ -3,11 +3,11 @@ package types import ( "fmt" + "github.com/ethereum/go-ethereum/rlp" "github.com/onflow/cadence" "github.com/onflow/cadence/common" "github.com/onflow/cadence/sema" cadenceRLP "github.com/onflow/cadence/stdlib/rlp" - "github.com/onflow/go-ethereum/rlp" "github.com/onflow/flow-go/model/flow" ) diff --git a/fvm/evm/types/result.go b/fvm/evm/types/result.go index 42dc165e426..756d1742e8d 100644 --- a/fvm/evm/types/result.go +++ b/fvm/evm/types/result.go @@ -3,11 +3,11 @@ package types import ( "fmt" - "github.com/onflow/go-ethereum/accounts/abi" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethVM "github.com/onflow/go-ethereum/core/vm" - "github.com/onflow/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/accounts/abi" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethVM "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/rlp" ) // InvalidTransactionGasCost is a gas cost we charge when diff --git a/fvm/evm/types/result_test.go b/fvm/evm/types/result_test.go index 36dfb3916ad..dcdfaf71000 100644 --- a/fvm/evm/types/result_test.go +++ b/fvm/evm/types/result_test.go @@ -3,8 +3,8 @@ package types_test import ( "testing" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethTrie "github.com/onflow/go-ethereum/trie" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethTrie "github.com/ethereum/go-ethereum/trie" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/evm/testutils" diff --git a/fvm/evm/types/state.go b/fvm/evm/types/state.go index b9cd671cb9b..b7bdeb56b2a 100644 --- a/fvm/evm/types/state.go +++ b/fvm/evm/types/state.go @@ -1,12 +1,12 @@ package types import ( + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethVM "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/rlp" "github.com/holiman/uint256" "github.com/onflow/crypto/hash" - gethCommon "github.com/onflow/go-ethereum/common" - gethTypes "github.com/onflow/go-ethereum/core/types" - gethVM "github.com/onflow/go-ethereum/core/vm" - "github.com/onflow/go-ethereum/rlp" ) // StateDB acts as the main interface to the EVM runtime diff --git a/fvm/fvm_bench_test.go b/fvm/fvm_bench_test.go index 566cf81653a..cb7c4f27003 100644 --- a/fvm/fvm_bench_test.go +++ b/fvm/fvm_bench_test.go @@ -469,7 +469,7 @@ func BenchmarkRuntimeTransaction(b *testing.B) { access(all) fun empty() { } - access(all) fun emit() { + access(all) fun emitEvent() { emit SomeEvent() } } @@ -607,7 +607,7 @@ func BenchmarkRuntimeTransaction(b *testing.B) { b.Run("get account and get storage used", func(b *testing.B) { benchTransaction(b, func(b *testing.B, context benchTransactionContext) string { - return templateTx(100, `getAccount(signer.address).storageUsed`) + return templateTx(100, `getAccount(signer.address).storage.used`) }, ) }) @@ -691,7 +691,7 @@ func BenchmarkRuntimeTransaction(b *testing.B) { b.Run("emit event", func(b *testing.B) { benchTransaction(b, func(b *testing.B, context benchTransactionContext) string { - return templateTx(100, `TestContract.emit()`) + return templateTx(100, `TestContract.emitEvent()`) }, ) }) diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index 79413303574..044f303274e 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -2847,6 +2847,54 @@ func TestTransientNetworkCoreContractAddresses(t *testing.T) { }) } +func TestFlowCallbackScheduler(t *testing.T) { + ctxOpts := []fvm.Option{ + fvm.WithScheduleCallbacksEnabled(true), + } + + newVMTest(). + withContextOptions(ctxOpts...). + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + require.NotNil(t, sc.FlowCallbackScheduler.Address) + require.NotNil(t, sc.FlowCallbackScheduler.Name) + + script := fvm.Script([]byte(fmt.Sprintf(` + import FlowCallbackScheduler from %s + access(all) fun main(): FlowCallbackScheduler.Status? { + return FlowCallbackScheduler.getStatus(id: 1) + } + `, sc.FlowCallbackScheduler.Address.HexWithPrefix()))) + + _, output, err := vm.Run(ctx, script, snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) + require.NotNil(t, output.Value) + require.Equal(t, output.Value, cadence.NewOptional(nil)) + + script = fvm.Script([]byte(fmt.Sprintf(` + import FlowCallbackScheduler from %s + access(all) fun main(): UInt64 { + return FlowCallbackScheduler.getSlotAvailableEffort(timestamp: 1.0, priority: FlowCallbackScheduler.Priority.High) + } + `, sc.FlowCallbackScheduler.Address.HexWithPrefix()))) + + const maxEffortAvailable = 30_000 // FLIP 330 + _, output, err = vm.Run(ctx, script, snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) + require.NotNil(t, output.Value) + require.Equal(t, cadence.UInt64(maxEffortAvailable), output.Value) + }, + )(t) +} + func TestEVM(t *testing.T) { blocks := new(envMock.Blocks) block1 := unittest.BlockFixture() diff --git a/fvm/initialize/options.go b/fvm/initialize/options.go index fcfce074601..e484dcaccfd 100644 --- a/fvm/initialize/options.go +++ b/fvm/initialize/options.go @@ -10,7 +10,11 @@ import ( // InitFvmOptions initializes the FVM options based on the chain ID and headers. // This function is extracted so that it can be reused in multiple places, // and ensure that the FVM options are consistent across different components. -func InitFvmOptions(chainID flow.ChainID, headers storage.Headers) []fvm.Option { +func InitFvmOptions( + chainID flow.ChainID, + headers storage.Headers, + transactionFeesDisabled bool, +) []fvm.Option { blockFinder := environment.NewBlockFinder(headers) vmOpts := []fvm.Option{ fvm.WithChain(chainID.Chain()), @@ -22,8 +26,9 @@ func InitFvmOptions(chainID flow.ChainID, headers storage.Headers) []fvm.Option flow.Sandboxnet, flow.Previewnet, flow.Mainnet: + feesEnabled := !transactionFeesDisabled vmOpts = append(vmOpts, - fvm.WithTransactionFeesEnabled(true), + fvm.WithTransactionFeesEnabled(feesEnabled), ) } switch chainID { diff --git a/fvm/systemcontracts/system_contracts.go b/fvm/systemcontracts/system_contracts.go index 636bf764c1b..043844ae369 100644 --- a/fvm/systemcontracts/system_contracts.go +++ b/fvm/systemcontracts/system_contracts.go @@ -32,6 +32,7 @@ const ( ContractNameServiceAccount = "FlowServiceAccount" ContractNameFlowFees = "FlowFees" ContractNameStorageFees = "FlowStorageFees" + ContractNameFlowCallbackScheduler = "FlowCallbackScheduler" ContractNameNodeVersionBeacon = "NodeVersionBeacon" ContractNameRandomBeaconHistory = "RandomBeaconHistory" ContractNameFungibleToken = "FungibleToken" @@ -161,6 +162,7 @@ type SystemContracts struct { // service account related contracts FlowServiceAccount SystemContract + FlowCallbackScheduler SystemContract NodeVersionBeacon SystemContract RandomBeaconHistory SystemContract FlowStorageFees SystemContract @@ -214,8 +216,8 @@ func (c SystemContracts) AsTemplateEnv() templates.Environment { NonFungibleTokenAddress: c.NonFungibleToken.Address.Hex(), MetadataViewsAddress: c.MetadataViews.Address.Hex(), - CrossVMMetadataViewsAddress: c.CrossVMMetadataViews.Address.Hex(), ViewResolverAddress: c.ViewResolver.Address.Hex(), + CrossVMMetadataViewsAddress: c.CrossVMMetadataViews.Address.Hex(), BurnerAddress: c.Burner.Address.Hex(), CryptoAddress: c.Crypto.Address.Hex(), @@ -231,6 +233,7 @@ func (c SystemContracts) All() []SystemContract { c.DKG, c.FlowServiceAccount, + c.FlowCallbackScheduler, c.NodeVersionBeacon, c.RandomBeaconHistory, c.FlowStorageFees, @@ -386,6 +389,7 @@ func init() { ContractNameRandomBeaconHistory: serviceAddressFunc, ContractNameServiceAccount: serviceAddressFunc, ContractNameStorageFees: serviceAddressFunc, + ContractNameFlowCallbackScheduler: serviceAddressFunc, AccountNameExecutionParametersAccount: executionParametersAccountFunc, ContractNameFlowFees: nthAddressFunc(FlowFeesAccountIndex), @@ -443,6 +447,7 @@ func init() { DKG: addressOfContract(ContractNameDKG), FlowServiceAccount: addressOfContract(ContractNameServiceAccount), + FlowCallbackScheduler: addressOfContract(ContractNameFlowCallbackScheduler), NodeVersionBeacon: addressOfContract(ContractNameNodeVersionBeacon), RandomBeaconHistory: addressOfContract(ContractNameRandomBeaconHistory), FlowStorageFees: addressOfContract(ContractNameStorageFees), diff --git a/fvm/transactionSequenceNum.go b/fvm/transactionSequenceNum.go index 81b77e4868f..51dc2014b0d 100644 --- a/fvm/transactionSequenceNum.go +++ b/fvm/transactionSequenceNum.go @@ -56,7 +56,7 @@ func (c TransactionSequenceNumberChecker) checkAndIncrementSequenceNumber( var accountKey flow.AccountPublicKey - accountKey, err = accounts.GetPublicKey(proposalKey.Address, proposalKey.KeyIndex) + accountKey, err = accounts.GetAccountPublicKey(proposalKey.Address, proposalKey.KeyIndex) if err != nil { return errors.NewInvalidProposalSignatureError(proposalKey, err) } @@ -76,7 +76,7 @@ func (c TransactionSequenceNumberChecker) checkAndIncrementSequenceNumber( accountKey.SeqNumber++ - _, err = accounts.SetPublicKey(proposalKey.Address, proposalKey.KeyIndex, accountKey) + _, err = accounts.SetAccountPublicKey(proposalKey.Address, proposalKey.KeyIndex, accountKey) if err != nil { restartError := txnState.RestartNestedTransaction(nestedTxnId) if restartError != nil { diff --git a/fvm/transactionSequenceNum_test.go b/fvm/transactionSequenceNum_test.go index c711e30d7cc..8603a25604d 100644 --- a/fvm/transactionSequenceNum_test.go +++ b/fvm/transactionSequenceNum_test.go @@ -38,7 +38,7 @@ func TestTransactionSequenceNumProcess(t *testing.T) { require.NoError(t, err) // get fetch the sequence number and it should be updated - key, err := accounts.GetPublicKey(address, 0) + key, err := accounts.GetAccountPublicKey(address, 0) require.NoError(t, err) require.Equal(t, key.SeqNumber, uint64(1)) }) @@ -67,7 +67,7 @@ func TestTransactionSequenceNumProcess(t *testing.T) { require.True(t, errors.HasErrorCode(err, errors.ErrCodeInvalidProposalSeqNumberError)) // get fetch the sequence number and check it to be unchanged - key, err := accounts.GetPublicKey(address, 0) + key, err := accounts.GetAccountPublicKey(address, 0) require.NoError(t, err) require.Equal(t, key.SeqNumber, uint64(0)) }) @@ -95,7 +95,7 @@ func TestTransactionSequenceNumProcess(t *testing.T) { require.Error(t, err) // get fetch the sequence number and check it to be unchanged - key, err := accounts.GetPublicKey(address, 0) + key, err := accounts.GetAccountPublicKey(address, 0) require.NoError(t, err) require.Equal(t, key.SeqNumber, uint64(0)) }) diff --git a/fvm/transactionVerifier.go b/fvm/transactionVerifier.go index 2b4ac0d640f..1968bea6cea 100644 --- a/fvm/transactionVerifier.go +++ b/fvm/transactionVerifier.go @@ -259,14 +259,14 @@ func (v *TransactionVerifier) verifyTransaction( // getAccountKeys gets the signatures' account keys and populate the account // keys into the signature continuation structs. func (v *TransactionVerifier) getAccountKeys( - txnState storage.TransactionPreparer, + _ storage.TransactionPreparer, accounts environment.Accounts, signatures []*signatureContinuation, proposalKey flow.ProposalKey, ) error { foundProposalSignature := false for _, signature := range signatures { - accountKey, err := accounts.GetPublicKey( + accountKey, err := accounts.GetAccountPublicKey( signature.Address, signature.KeyIndex) if err != nil { diff --git a/go.mod b/go.mod index 8178bb6e7c7..792f5c478e0 100644 --- a/go.mod +++ b/go.mod @@ -3,18 +3,19 @@ module github.com/onflow/flow-go go 1.23.7 require ( - cloud.google.com/go/compute/metadata v0.6.0 + cloud.google.com/go/compute/metadata v0.7.0 cloud.google.com/go/profiler v0.3.0 cloud.google.com/go/storage v1.50.0 github.com/antihax/optional v1.0.0 - github.com/aws/aws-sdk-go-v2/config v1.29.14 + github.com/aws/aws-sdk-go-v2/config v1.30.0 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1 github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0 github.com/btcsuite/btcd/btcec/v2 v2.3.4 + github.com/cockroachdb/pebble/v2 v2.0.6 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/dgraph-io/badger/v2 v2.2007.4 github.com/ef-ds/deque v1.0.4 - github.com/ethereum/go-ethereum v1.13.10 + github.com/ethereum/go-ethereum v1.16.2 github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829 github.com/gammazero/workerpool v1.1.3 github.com/gogo/protobuf v1.3.2 @@ -28,13 +29,12 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/golang-lru v1.0.2 github.com/improbable-eng/grpc-web v0.15.0 github.com/ipfs/go-block-format v0.2.0 github.com/ipfs/go-cid v0.4.1 - github.com/ipfs/go-datastore v0.6.0 - github.com/ipfs/go-ds-badger2 v0.1.3 - github.com/ipfs/go-ds-pebble v0.3.1-0.20240828032824-d745b9d3200b + github.com/ipfs/go-datastore v0.8.2 + github.com/ipfs/go-ds-badger2 v0.1.4 + github.com/ipfs/go-ds-pebble v0.5.0 github.com/ipfs/go-ipld-format v0.6.0 github.com/ipfs/go-log v1.0.5 github.com/ipfs/go-log/v2 v2.5.1 @@ -48,13 +48,13 @@ require ( github.com/multiformats/go-multiaddr-dns v0.4.1 github.com/multiformats/go-multihash v0.2.3 github.com/onflow/atree v0.10.0 - github.com/onflow/cadence v1.6.0 + github.com/onflow/cadence v1.6.4-rc.1-public github.com/onflow/crypto v0.25.3 github.com/onflow/flow v0.3.4 - github.com/onflow/flow-core-contracts/lib/go/contracts v1.6.1 - github.com/onflow/flow-core-contracts/lib/go/templates v1.6.1 - github.com/onflow/flow-go-sdk v1.6.0 - github.com/onflow/flow/protobuf/go/flow v0.4.10 + github.com/onflow/flow-core-contracts/lib/go/contracts v1.7.4-0.20250825171851-5a0da78d0022 + github.com/onflow/flow-core-contracts/lib/go/templates v1.7.2-0.20250825171851-5a0da78d0022 + github.com/onflow/flow-go-sdk v1.7.0 + github.com/onflow/flow/protobuf/go/flow v0.4.11 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pierrec/lz4 v2.6.1+incompatible github.com/pkg/errors v0.9.1 @@ -66,26 +66,26 @@ require ( github.com/sethvargo/go-retry v0.2.3 github.com/shirou/gopsutil/v3 v3.22.2 github.com/spf13/cobra v1.8.1 - github.com/spf13/pflag v1.0.5 + github.com/spf13/pflag v1.0.6 github.com/spf13/viper v1.15.0 github.com/stretchr/testify v1.10.0 github.com/vmihailenco/msgpack/v4 v4.3.11 - go.opentelemetry.io/otel v1.35.0 + go.opentelemetry.io/otel v1.36.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 - go.opentelemetry.io/otel/sdk v1.35.0 - go.opentelemetry.io/otel/trace v1.35.0 + go.opentelemetry.io/otel/sdk v1.36.0 + go.opentelemetry.io/otel/trace v1.36.0 go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 - golang.org/x/crypto v0.37.0 + golang.org/x/crypto v0.39.0 golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 - golang.org/x/sync v0.14.0 - golang.org/x/sys v0.32.0 - golang.org/x/text v0.24.0 - golang.org/x/time v0.11.0 - golang.org/x/tools v0.32.0 - google.golang.org/api v0.232.0 - google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb - google.golang.org/grpc v1.72.0 + golang.org/x/sync v0.15.0 + golang.org/x/sys v0.33.0 + golang.org/x/text v0.26.0 + golang.org/x/time v0.12.0 + golang.org/x/tools v0.33.0 + google.golang.org/api v0.241.0 + google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 + google.golang.org/grpc v1.74.2 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 google.golang.org/protobuf v1.36.6 gotest.tools v2.2.0+incompatible @@ -93,40 +93,47 @@ require ( ) require ( - github.com/cockroachdb/pebble v1.1.2 github.com/coreos/go-semver v0.3.0 github.com/docker/go-units v0.5.0 github.com/dustin/go-humanize v1.0.1 github.com/go-playground/validator/v10 v10.19.0 - github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb + github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e github.com/gorilla/websocket v1.5.3 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/holiman/uint256 v1.3.2 github.com/huandu/go-clone/generic v1.7.2 github.com/ipfs/boxo v0.17.1-0.20240131173518-89bceff34bf1 + github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956 github.com/libp2p/go-libp2p-routing-helpers v0.7.4 github.com/mitchellh/mapstructure v1.5.0 github.com/onflow/flow-evm-bridge v0.1.0 - github.com/onflow/go-ethereum v1.15.10 + github.com/onflow/go-ethereum v1.13.4 github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 github.com/onflow/wal v1.0.2 github.com/slok/go-http-metrics v0.12.0 github.com/sony/gobreaker v0.5.0 golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da - google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e - google.golang.org/genproto/googleapis/bytestream v0.0.0-20250428153025-10db94c68c34 + google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a + google.golang.org/genproto/googleapis/bytestream v0.0.0-20250603155806-513f23925822 gopkg.in/yaml.v2 v2.4.0 ) require ( - cel.dev/expr v0.20.0 // indirect + github.com/crate-crypto/go-eth-kzg v1.3.0 // indirect + github.com/emicklei/dot v1.6.2 // indirect + github.com/ethereum/c-kzg-4844/v2 v2.1.0 // indirect + github.com/ferranbt/fastssz v0.1.4 // indirect +) + +require ( + cel.dev/expr v0.24.0 // indirect cloud.google.com/go v0.120.0 // indirect - cloud.google.com/go/auth v0.16.1 // indirect + cloud.google.com/go/auth v0.16.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect - cloud.google.com/go/iam v1.5.0 // indirect - cloud.google.com/go/monitoring v1.24.0 // indirect - github.com/DataDog/zstd v1.5.2 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect + github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 // indirect github.com/Jorropo/jsync v1.0.1 // indirect @@ -135,37 +142,37 @@ require ( github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/VictoriaMetrics/fastcache v1.12.2 // indirect - github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2 v1.37.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.17.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect - github.com/aws/smithy-go v1.22.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.26.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.35.0 // indirect + github.com/aws/smithy-go v1.22.5 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.17.0 // indirect + github.com/bits-and-blooms/bitset v1.20.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect + github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 // indirect github.com/cockroachdb/errors v1.11.3 // indirect github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect - github.com/consensys/bavard v0.1.22 // indirect - github.com/consensys/gnark-crypto v0.14.0 // indirect + github.com/consensys/gnark-crypto v0.18.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect - github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/deckarep/golang-set/v2 v2.6.0 // indirect @@ -176,7 +183,6 @@ require ( github.com/elastic/gosigar v0.14.3 // indirect github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect - github.com/ethereum/c-kzg-4844 v1.0.0 // indirect github.com/ethereum/go-verkle v0.2.2 // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -188,24 +194,25 @@ require ( github.com/gabriel-vasile/mimetype v1.4.6 // indirect github.com/gammazero/deque v1.0.0 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect - github.com/go-jose/go-jose/v4 v4.0.4 // indirect + github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/gofrs/flock v0.8.1 // indirect - github.com/golang/glog v1.2.4 // indirect + github.com/gofrs/flock v0.12.1 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect - github.com/googleapis/gax-go/v2 v2.14.1 // indirect + github.com/googleapis/gax-go/v2 v2.14.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/golang-lru v1.0.2 github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/huandu/go-clone v1.6.0 // indirect @@ -252,7 +259,6 @@ require ( github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect - github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect @@ -327,32 +333,29 @@ require ( github.com/zeebo/errs v1.4.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.34.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.23.0 // indirect go.uber.org/mock v0.5.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/mod v0.24.0 // indirect - golang.org/x/net v0.39.0 // indirect + golang.org/x/mod v0.25.0 // indirect + golang.org/x/net v0.41.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/term v0.31.0 // indirect + golang.org/x/term v0.32.0 // indirect gonum.org/v1/gonum v0.16.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.4.0 // indirect nhooyr.io/websocket v1.8.7 // indirect - rsc.io/tmplfunc v0.0.3 // indirect ) // Using custom fork until https://github.com/onflow/flow-go/issues/5338 is resolved replace github.com/ipfs/boxo => github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 - -replace github.com/hashicorp/golang-lru/v2 => github.com/fxamacker/golang-lru/v2 v2.0.0-20250430153159-6f72f038a30f diff --git a/go.sum b/go.sum index 10bee0ae1f8..f7662ab93f1 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= -cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -35,8 +35,8 @@ cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2Z cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA= cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q= -cloud.google.com/go/auth v0.16.1 h1:XrXauHMd30LhQYVRHLGvJiYeczweKQXZxsTbV9TiguU= -cloud.google.com/go/auth v0.16.1/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= +cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4= +cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -50,19 +50,19 @@ cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJW cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v1.5.0 h1:QlLcVMhbLGOjRcGe6VTGGTyQib8dRLK2B/kYNV0+2xs= -cloud.google.com/go/iam v1.5.0/go.mod h1:U+DOtKQltF/LxPEtcDLoobcsZMilSRwR7mgNL7knOpo= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= -cloud.google.com/go/longrunning v0.6.6 h1:XJNDo5MUfMM05xK3ewpbSdmt7R2Zw+aQEMbdQR65Rbw= -cloud.google.com/go/longrunning v0.6.6/go.mod h1:hyeGJUrPHcx0u2Uu1UFSoYZLn4lkMrccJig0t4FI7yw= -cloud.google.com/go/monitoring v1.24.0 h1:csSKiCJ+WVRgNkRzzz3BPoGjFhjPY23ZTcaenToJxMM= -cloud.google.com/go/monitoring v1.24.0/go.mod h1:Bd1PRK5bmQBQNnuGwHBfUamAV1ys9049oEPHnn4pcsc= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= cloud.google.com/go/profiler v0.3.0 h1:R6y/xAeifaUXxd2x6w+jIwKxoKl8Cv5HJvcvASTPWJo= cloud.google.com/go/profiler v0.3.0/go.mod h1:9wYk9eY4iZHsev8TQb61kh3wiOiSyz/xOYixWPzweCU= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -78,8 +78,8 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= -cloud.google.com/go/trace v1.11.3 h1:c+I4YFjxRQjvAhRmSsmjpASUKq88chOX854ied0K/pE= -cloud.google.com/go/trace v1.11.3/go.mod h1:pt7zCYiDSQjC9Y2oqCsh9jF4GStB/hmjrYLsxRR27q8= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= @@ -88,11 +88,10 @@ dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= -github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0/go.mod h1:ZV4VOm0/eHR06JLrXWe09068dHpr3TRpY9Uo7T+anuA= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0 h1:nNMpRpnkWDAaqcpxMJvxa/Ud98gjbYwayJY4/9bdjiU= @@ -116,6 +115,8 @@ github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9 github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -138,47 +139,47 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= -github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2 v1.37.0 h1:YtCOESR/pN4j5oA7cVHSfOwIcuh/KwHC4DOSXFbv5F0= +github.com/aws/aws-sdk-go-v2 v1.37.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= github.com/aws/aws-sdk-go-v2/config v1.8.0/go.mod h1:w9+nMZ7soXCe5nT46Ri354SNhXDQ6v+V5wqDjnZE+GY= -github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM= -github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g= +github.com/aws/aws-sdk-go-v2/config v1.30.0 h1:XhzXYU2x/T441/0CBh0g6UUC/OFGk+FRpl3ThI8AqM8= +github.com/aws/aws-sdk-go-v2/config v1.30.0/go.mod h1:4j78A2ko2xc7SMLjjSUrgpp42vyneH9c8j3emf/CLTo= github.com/aws/aws-sdk-go-v2/credentials v1.4.0/go.mod h1:dgGR+Qq7Wjcd4AOAW5Rf5Tnv3+x7ed6kETXyS9WCuAY= -github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ= +github.com/aws/aws-sdk-go-v2/credentials v1.18.0 h1:r9W/BX4B1dEbsd2NogyuFXmEfYhdUULUVEOh0SDAovw= +github.com/aws/aws-sdk-go-v2/credentials v1.18.0/go.mod h1:SMtUJQRWEpyfC+ouDJNYdI7NNMqUjHM/Oaf0FV+vWNs= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0/go.mod h1:CpNzHK9VEFUCknu50kkB8z58AH2B5DvPP7ea1LHve/Y= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.17.0 h1:ouCRc4lCriJtCnrIN4Kw2tA/uETRZBrxwb/607gRvkE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.17.0/go.mod h1:LW9/PxQD1SYFC7pnWcgqPhoyZprhjEdg5hBK6qYPLW8= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1 h1:VGkV9KmhGqOQWnHyi4gLG98kE6OecT42fdrCGFWxJsc= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1/go.mod h1:PLlnMiki//sGnCJiW+aVpvP/C8Kcm8mEj/IVm9+9qk4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 h1:H2iZoqW/v2Jnrh1FnU725Bq6KJ0k2uP63yH+DcY+HUI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0/go.mod h1:L0FqLbwMXHvNC/7crWV1iIxUlOKYZUE8KuTIA+TozAI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 h1:EDped/rNzAhFPhVY0sDGbtD16OKqksfA8OjF/kLEgw8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0/go.mod h1:uUI335jvzpZRPpjYx6ODc/wg1qH+NnoSTK/FwVeK0C0= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2/go.mod h1:BQV0agm+JEhqR+2RT5e1XTFIDcAAV0eW6z2trp+iduw= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0/go.mod h1:R1KK+vY8AfalhG1AOu5e35pOD2SdoPKQCFLTvnxiohk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 h1:eRhU3Sh8dGbaniI6B+I48XJMrTPRkK4DKo+vqIxziOU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0/go.mod h1:paNLV18DZ6FnWE/bd06RIKPDIFpjuvCkGKWTG/GDBeM= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0 h1:HWsM0YQWX76V6MOp07YuTYacm8k7h69ObJuw7Nck+og= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0/go.mod h1:LKb3cKNQIMh+itGnEpKGcnL/6OIjPZqrtYah1w5f+3o= github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0 h1:nPLfLPfglacc29Y949sDxpr3X/blaY40s3B85WT2yZU= github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0/go.mod h1:Iv2aJVtVSm/D22rFoX99cLG4q4uB7tppuCsulGe98k4= github.com/aws/aws-sdk-go-v2/service/sso v1.4.0/go.mod h1:+1fpWnL96DL23aXPpMGbsmKe8jLTEfbjuQoA4WS1VaA= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= +github.com/aws/aws-sdk-go-v2/service/sso v1.26.0 h1:cuFWHH87GP1NBGXXfMicUbE7Oty5KpPxN6w4JpmuxYc= +github.com/aws/aws-sdk-go-v2/service/sso v1.26.0/go.mod h1:aJBemdlbCKyOXEXdXBqS7E+8S9XTDcOTaoOjtng54hA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.0 h1:t2va+wewPOYIqC6XyJ4MGjiGKkczMAPsgq5W4FtL9ME= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.0/go.mod h1:ExCTcqYqN0hYYRsDlBVU8+68grqlWdgX9/nZJwQW4aY= github.com/aws/aws-sdk-go-v2/service/sts v1.7.0/go.mod h1:0qcSMCyASQPN2sk/1KQLQ2Fh6yq8wm0HSDAimPhzCoM= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= +github.com/aws/aws-sdk-go-v2/service/sts v1.35.0 h1:FD9agdG4CeOGS3ORLByJk56YIXDS7mxFpmZyCtpqExc= +github.com/aws/aws-sdk-go-v2/service/sts v1.35.0/go.mod h1:NDzDPbBF1xtSTZUMuZx0w3hIfWzcL7X2AQ0Tr9becIQ= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= +github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -188,8 +189,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.17.0 h1:1X2TS7aHz1ELcC0yU1y2stUs/0ig5oMU6STFZGrhvHI= -github.com/bits-and-blooms/bitset v1.17.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU= +github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= @@ -229,28 +230,34 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 h1:bvJv505UUfjzbaIPdNS4AEkHreDqQk6yuNpsdRHpwFA= +github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056 h1:slXychO2uDM6hYRu4c0pD0udNI8uObfeKN6UInWViS8= +github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA= -github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= +github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= +github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= +github.com/cockroachdb/pebble/v2 v2.0.6 h1:eL54kX2AKp1ePJ/8vq4IO3xIEPpvVjlSP12dlLYilyE= +github.com/cockroachdb/pebble/v2 v2.0.6/go.mod h1:un1DXG73PKw3F7Ndd30YactyvsFviI9Fuhe0tENdnyA= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA= +github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/consensys/bavard v0.1.22 h1:Uw2CGvbXSZWhqK59X0VG/zOjpTFuOMcPLStrp1ihI0A= -github.com/consensys/bavard v0.1.22/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs= -github.com/consensys/gnark-crypto v0.14.0 h1:DDBdl4HaBtdQsq/wfMwJvZNE80sHidrK3Nfrefatm0E= -github.com/consensys/gnark-crypto v0.14.0/go.mod h1:CU4UijNPsHawiVGNxe9co07FkzCeWHHrb1li/n1XoU0= +github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= +github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= @@ -273,10 +280,10 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-eth-kzg v1.3.0 h1:05GrhASN9kDAidaFJOda6A4BEvgvuXbazXg/0E3OOdI= +github.com/crate-crypto/go-eth-kzg v1.3.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= -github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4= -github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= @@ -295,7 +302,6 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnN github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= -github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= @@ -321,6 +327,8 @@ github.com/ef-ds/deque v1.0.4/go.mod h1:gXDnTC3yqvBcHbq2lcExjtAcVrOnJCbMcZXmuj8Z github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -340,10 +348,10 @@ github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJP github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= -github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= -github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/ethereum/go-ethereum v1.13.10 h1:Ppdil79nN+Vc+mXfge0AuUgmKWuVv4eMqzoIVSdqZek= -github.com/ethereum/go-ethereum v1.13.10/go.mod h1:sc48XYQxCzH3fG9BcrXCOOgQk2JfZzNAmIKnceogzsA= +github.com/ethereum/c-kzg-4844/v2 v2.1.0 h1:gQropX9YFBhl3g4HYhwE70zq3IHFRgbbNPw0Shwzf5w= +github.com/ethereum/c-kzg-4844/v2 v2.1.0/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E= +github.com/ethereum/go-ethereum v1.16.2 h1:VDHqj86DaQiMpnMgc7l0rwZTg0FRmlz74yupSG5SnzI= +github.com/ethereum/go-ethereum v1.16.2/go.mod h1:X5CIOyo8SuK1Q5GnaEizQVLHT/DfsiGWuNeVdQcEMNA= github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -351,6 +359,8 @@ github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= +github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -370,8 +380,6 @@ github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829 h1:qOglMkJ5YBw github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/fxamacker/circlehash v0.3.0 h1:XKdvTtIJV9t7DDUtsf0RIpC1OcxZtPbmgIH7ekx28WA= github.com/fxamacker/circlehash v0.3.0/go.mod h1:3aq3OfVvsWtkWMb6A1owjOQFA+TLsD5FgJflnaQwtMM= -github.com/fxamacker/golang-lru/v2 v2.0.0-20250430153159-6f72f038a30f h1:/gqGg2NQVvwiLXs7ppw2uneC5AAd2Z9OTp0zgu42zNI= -github.com/fxamacker/golang-lru/v2 v2.0.0-20250430153159-6f72f038a30f/go.mod h1:qnbgnNzfydwuHjSCApF4bdul+tZ8T3y1MkZG/OFczLA= github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc= github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc= github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34= @@ -380,6 +388,8 @@ github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44 github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= @@ -394,8 +404,8 @@ github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3Bop github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= -github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= +github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -409,8 +419,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -448,8 +458,8 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -458,8 +468,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= -github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -498,11 +508,10 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -557,9 +566,7 @@ github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAx github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= -github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -575,8 +582,8 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= -github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= +github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= +github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -628,6 +635,8 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -666,18 +675,16 @@ github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= -github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= -github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= -github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-datastore v0.8.2 h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U= +github.com/ipfs/go-datastore v0.8.2/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ds-badger2 v0.1.3 h1:Zo9JicXJ1DmXTN4KOw7oPXkspZ0AWHcAFCP1tQKnegg= -github.com/ipfs/go-ds-badger2 v0.1.3/go.mod h1:TPhhljfrgewjbtuL/tczP8dNrBYwwk+SdPYbms/NO9w= -github.com/ipfs/go-ds-pebble v0.3.1-0.20240828032824-d745b9d3200b h1:lby3w+96HfyjiFP4ODbcfr4j7pNza7g3XQywnNu+9Mc= -github.com/ipfs/go-ds-pebble v0.3.1-0.20240828032824-d745b9d3200b/go.mod h1:q8icEwk8lSpbPAsa7l9SPm6yt9Z+c4QePFyfKHYmdJw= +github.com/ipfs/go-ds-badger2 v0.1.4 h1:4EDTEDV/Ft/zr5AaJXp2IojvApwevlUD9uahMDONWTE= +github.com/ipfs/go-ds-badger2 v0.1.4/go.mod h1:6WOt9PzJ98Tu7gizJ35NuXDORsYxQ3c4/3gjqF+kq0c= +github.com/ipfs/go-ds-pebble v0.5.0 h1:lXffYCAKVD7nLLPqwJ9D8IxgO7Kz8woiX021tezdsIM= +github.com/ipfs/go-ds-pebble v0.5.0/go.mod h1:aiCRVcj3K60sxc6k5C+HO9C6rouqiSkjR/WKnbTcMfQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= @@ -690,7 +697,6 @@ github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9 github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= @@ -711,6 +717,8 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956 h1:4iii8SOozVG1lpkdPELRsjPEBhU4DeFPz2r2Fjj3UDU= +github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956/go.mod h1:qsnXMryYP9X7JbzskIn0+N40sE6XNXLr9kYRRP6rwXU= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -750,7 +758,6 @@ github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoK github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -863,9 +870,6 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= -github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= -github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -939,32 +943,32 @@ github.com/onflow/atree v0.10.0 h1:LFYlRgb0fjs8vezBW/N/tzi+ijLMssjHwIwoV4RwYaA= github.com/onflow/atree v0.10.0/go.mod h1:aqnnE8Os77JiBIeC7UcbeM7N1V3Ys5XWH0CykeMpym0= github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 h1:LpiQhTAfM9CAmNVEs0n//cBBgCg+vJSiIxTHYUklZ84= github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483/go.mod h1:pIZgTWdm3k3pLF9Uq6MB8JEcW07UDwNJjlXW1HELW80= -github.com/onflow/cadence v1.6.0 h1:nFHaEFvekL+9cXuO7w33w6Y7nC1X7PZZHQdSYfE8CvQ= -github.com/onflow/cadence v1.6.0/go.mod h1:MBHOSmj81EtNEGjvYK3UEaFMMrN6jo5wt9U7jvDVLUw= +github.com/onflow/cadence v1.6.4-rc.1-public h1:gIdOGie3gO/4I/I7R6Kou1LeWT9bW1k6XLMpmR/CN6c= +github.com/onflow/cadence v1.6.4-rc.1-public/go.mod h1:MBHOSmj81EtNEGjvYK3UEaFMMrN6jo5wt9U7jvDVLUw= github.com/onflow/crypto v0.25.3 h1:XQ3HtLsw8h1+pBN+NQ1JYM9mS2mVXTyg55OldaAIF7U= github.com/onflow/crypto v0.25.3/go.mod h1:+1igaXiK6Tjm9wQOBD1EGwW7bYWMUGKtwKJ/2QL/OWs= github.com/onflow/flow v0.3.4 h1:FXUWVdYB90f/rjNcY0Owo30gL790tiYff9Pb/sycXYE= github.com/onflow/flow v0.3.4/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.6.1 h1:n2NTsVT4iH7snqW6nkP1PnxHmgEMCh3XZpbdSIqweO4= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.6.1/go.mod h1:3tMXL4npVbk/F1X6SqxZWelQn0pyGvMBVbUXsgvC6Is= -github.com/onflow/flow-core-contracts/lib/go/templates v1.6.1 h1:Y0bDvS5fTOCrKr7QFl0by3qTq7MFnauVnHoxwW6nQzo= -github.com/onflow/flow-core-contracts/lib/go/templates v1.6.1/go.mod h1:pN768Al/wLRlf3bwugv9TyxniqJxMu4sxnX9eQJam64= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.7.4-0.20250825171851-5a0da78d0022 h1:RtNoSnri8LwUzlUeffpeBXjazSf7kqhFbHDOAu32bXc= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.7.4-0.20250825171851-5a0da78d0022/go.mod h1:/Yne6g7V2Fy1sm/vE78us221bYvVvL5cA8cOzN/uTCI= +github.com/onflow/flow-core-contracts/lib/go/templates v1.7.2-0.20250825171851-5a0da78d0022 h1:upAfrSTYm/a/Q7UMOE2sqJxsvgOGUkib7In59ogrwDI= +github.com/onflow/flow-core-contracts/lib/go/templates v1.7.2-0.20250825171851-5a0da78d0022/go.mod h1:yBkysayvSKZ/yFO3fEX4YQ/FEZtV6Tnov8ix0lBeiqM= github.com/onflow/flow-evm-bridge v0.1.0 h1:7X2osvo4NnQgHj8aERUmbYtv9FateX8liotoLnPL9nM= github.com/onflow/flow-evm-bridge v0.1.0/go.mod h1:5UYwsnu6WcBNrwitGFxphCl5yq7fbWYGYuiCSTVF6pk= github.com/onflow/flow-ft/lib/go/contracts v1.0.1 h1:Ts5ob+CoCY2EjEd0W6vdLJ7hLL3SsEftzXG2JlmSe24= github.com/onflow/flow-ft/lib/go/contracts v1.0.1/go.mod h1:PwsL8fC81cjnUnTfmyL/HOIyHnyaw/JA474Wfj2tl6A= github.com/onflow/flow-ft/lib/go/templates v1.0.1 h1:FDYKAiGowABtoMNusLuRCILIZDtVqJ/5tYI4VkF5zfM= github.com/onflow/flow-ft/lib/go/templates v1.0.1/go.mod h1:uQ8XFqmMK2jxyBSVrmyuwdWjTEb+6zGjRYotfDJ5pAE= -github.com/onflow/flow-go-sdk v1.6.0 h1:rN5T5Icva4GjV+RPiUlFm2FMmm0IkQ9f/B8SDlZHRu8= -github.com/onflow/flow-go-sdk v1.6.0/go.mod h1:EBcCMA9Bbjgp/A21i4qCthv9enV4CUYEVZoF8a68vMQ= +github.com/onflow/flow-go-sdk v1.7.0 h1:kSw94LZ+0ppt5ELqzixk7jjzkcrOR0Lh4mOgyu+KTOI= +github.com/onflow/flow-go-sdk v1.7.0/go.mod h1:a5JyRWg1mT6MoixnjTl/E/6AO95u/r2BBy7U/CycvUM= github.com/onflow/flow-nft/lib/go/contracts v1.2.4 h1:gWJgSSgIGo0qWOqr90+khQ69VoYF9vNlqzF+Yh6YYy4= github.com/onflow/flow-nft/lib/go/contracts v1.2.4/go.mod h1:eZ9VMMNfCq0ho6kV25xJn1kXeCfxnkhj3MwF3ed08gY= github.com/onflow/flow-nft/lib/go/templates v1.2.1 h1:SAALMZPDw9Eb9p5kSLnmnFxjyig1MLiT4JUlLp0/bSE= github.com/onflow/flow-nft/lib/go/templates v1.2.1/go.mod h1:W6hOWU0xltPqNpv9gQX8Pj8Jtf0OmRxc1XX2V0kzJaI= -github.com/onflow/flow/protobuf/go/flow v0.4.10 h1:CGEO3n96XZQd/k5HtkZyb90ouem9G+8fNcKyt8s2fvs= -github.com/onflow/flow/protobuf/go/flow v0.4.10/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= -github.com/onflow/go-ethereum v1.15.10 h1:blZBeOLJDOVWqKuhkkMh6S2PKQAJvdgbvOL9ZNggFcU= -github.com/onflow/go-ethereum v1.15.10/go.mod h1:t2nZJtwruVjA5u5yEK8InFzjImFLHrF7ak2bw3E4LDM= +github.com/onflow/flow/protobuf/go/flow v0.4.11 h1:EYjaATmor1ONczbvJ6VejQAJK4elFRk5h9AapxQ7j4c= +github.com/onflow/flow/protobuf/go/flow v0.4.11/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/onflow/go-ethereum v1.13.4 h1:iNO86fm8RbBbhZ87ZulblInqCdHnAQVY8okBrNsTevc= +github.com/onflow/go-ethereum v1.13.4/go.mod h1:cE/gEUkAffhwbVmMJYz+t1dAfVNHNwZCgc3BWtZxBGY= github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 h1:sxyWLqGm/p4EKT6DUlQESDG1ZNMN9GjPCm1gTq7NGfc= github.com/onflow/nft-storefront/lib/go/contracts v1.0.0/go.mod h1:kMeq9zUwCrgrSojEbTUTTJpZ4WwacVm2pA7LVFr+glk= github.com/onflow/sdks v0.6.0-preview.1 h1:mb/cUezuqWEP1gFZNAgUI4boBltudv4nlfxke1KBp9k= @@ -1115,6 +1119,8 @@ github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4= +github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs= github.com/psiemens/sconfig v0.1.0 h1:xfWqW+TRpih7mXZIqKYTmpRhlZLQ1kbxV8EjllPv76s= github.com/psiemens/sconfig v0.1.0/go.mod h1:+MLKqdledP/8G3rOBpknbLh0IclCf4WneJUtS26JB2U= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= @@ -1217,8 +1223,9 @@ github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmq github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= @@ -1333,28 +1340,28 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= @@ -1411,8 +1418,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1454,8 +1461,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= -golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= +golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1521,8 +1528,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= -golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1560,8 +1567,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= -golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1669,8 +1676,8 @@ golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -1679,8 +1686,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1695,14 +1702,14 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= -golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1767,8 +1774,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= -golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1819,8 +1826,8 @@ google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.232.0 h1:qGnmaIMf7KcuwHOlF3mERVzChloDYwRfOJOrHt8YC3I= -google.golang.org/api v0.232.0/go.mod h1:p9QCfBWZk1IJETUdbTKloR5ToFdKbYh2fkjsUL6vNoY= +google.golang.org/api v0.241.0 h1:QKwqWQlkc6O895LchPEDUSYr22Xp3NCxpQRiWTB6avE= +google.golang.org/api v0.241.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1916,14 +1923,14 @@ google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= -google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e h1:UdXH7Kzbj+Vzastr5nVfccbmFsmYNygVLSPk1pEfDoY= -google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e/go.mod h1:085qFyf2+XaZlRdCgKNCIZ3afY2p4HHZdoIRpId8F4A= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20250428153025-10db94c68c34 h1:nfEb4Q4usCEhvyA4vmf47NmO3alop2ab5p5gupICWU4= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20250428153025-10db94c68c34/go.mod h1:h6yxum/C2qRb4txaZRLDHK8RyS0H/o2oEDeKY4onY/Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 h1:h6p3mQqrmT1XkHVTfzLdNz1u7IhINeZkz67/xTbOuWs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= +google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a h1:SGktgSolFCo75dnHJF2yMvnns6jCmHFJ0vE4Vn2JKvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a/go.mod h1:a77HrdMjoeKbnd2jmgcWdaS++ZLZAEq3orIOAEIKiVw= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20250603155806-513f23925822 h1:zWFRixYR5QlotL+Uv3YfsPRENIrQFXiGs+iwqel6fOQ= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20250603155806-513f23925822/go.mod h1:h6yxum/C2qRb4txaZRLDHK8RyS0H/o2oEDeKY4onY/Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1961,8 +1968,8 @@ google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= -google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= +google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= @@ -2036,8 +2043,6 @@ pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= -rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= diff --git a/insecure/go.mod b/insecure/go.mod index 639f67a749e..bc7a3b64b03 100644 --- a/insecure/go.mod +++ b/insecure/go.mod @@ -6,31 +6,31 @@ require ( github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 github.com/golang/protobuf v1.5.4 github.com/hashicorp/go-multierror v1.1.1 - github.com/ipfs/go-datastore v0.6.0 + github.com/ipfs/go-datastore v0.8.2 github.com/libp2p/go-libp2p v0.38.2 github.com/libp2p/go-libp2p-pubsub v0.13.0 github.com/multiformats/go-multiaddr-dns v0.4.1 github.com/onflow/crypto v0.25.3 github.com/onflow/flow-go v0.36.2-0.20240717162253-d5d2e606ef53 github.com/rs/zerolog v1.29.0 - github.com/spf13/pflag v1.0.5 + github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 go.uber.org/atomic v1.11.0 - google.golang.org/grpc v1.72.0 + google.golang.org/grpc v1.74.2 google.golang.org/protobuf v1.36.6 ) require ( - cel.dev/expr v0.20.0 // indirect + cel.dev/expr v0.24.0 // indirect cloud.google.com/go v0.120.0 // indirect - cloud.google.com/go/auth v0.16.1 // indirect + cloud.google.com/go/auth v0.16.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect - cloud.google.com/go/compute/metadata v0.6.0 // indirect - cloud.google.com/go/iam v1.5.0 // indirect - cloud.google.com/go/monitoring v1.24.0 // indirect + cloud.google.com/go/compute/metadata v0.7.0 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect cloud.google.com/go/storage v1.50.0 // indirect - github.com/DataDog/zstd v1.5.2 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 // indirect + github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 // indirect github.com/Jorropo/jsync v1.0.1 // indirect @@ -38,43 +38,44 @@ require ( github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/VictoriaMetrics/fastcache v1.12.2 // indirect - github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect - github.com/aws/aws-sdk-go-v2/config v1.29.14 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect + github.com/aws/aws-sdk-go-v2 v1.37.0 // indirect + github.com/aws/aws-sdk-go-v2/config v1.30.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.17.0 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0 // indirect github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect - github.com/aws/smithy-go v1.22.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.26.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.35.0 // indirect + github.com/aws/smithy-go v1.22.5 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.17.0 // indirect + github.com/bits-and-blooms/bitset v1.20.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect + github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 // indirect github.com/cockroachdb/errors v1.11.3 // indirect github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v1.1.2 // indirect + github.com/cockroachdb/pebble/v2 v2.0.6 // indirect github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect - github.com/consensys/bavard v0.1.22 // indirect - github.com/consensys/gnark-crypto v0.14.0 // indirect + github.com/consensys/gnark-crypto v0.18.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/crate-crypto/go-eth-kzg v1.3.0 // indirect github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect - github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect @@ -88,12 +89,14 @@ require ( github.com/dustin/go-humanize v1.0.1 // indirect github.com/ef-ds/deque v1.0.4 // indirect github.com/elastic/gosigar v0.14.3 // indirect + github.com/emicklei/dot v1.6.2 // indirect github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect - github.com/ethereum/c-kzg-4844 v1.0.0 // indirect - github.com/ethereum/go-ethereum v1.13.10 // indirect + github.com/ethereum/c-kzg-4844/v2 v2.1.0 // indirect + github.com/ethereum/go-ethereum v1.16.2 // indirect github.com/ethereum/go-verkle v0.2.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/ferranbt/fastssz v0.1.4 // indirect github.com/filecoin-project/go-clock v0.1.0 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect @@ -104,11 +107,11 @@ require ( github.com/gammazero/deque v1.0.0 // indirect github.com/gammazero/workerpool v1.1.3 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect - github.com/go-jose/go-jose/v4 v4.0.4 // indirect + github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-playground/locales v0.14.1 // indirect @@ -116,17 +119,17 @@ require ( github.com/go-playground/validator/v10 v10.19.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.2.4 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/golang/mock v1.6.0 // indirect - github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect - github.com/googleapis/gax-go/v2 v2.14.1 // indirect + github.com/googleapis/gax-go/v2 v2.14.2 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect @@ -148,8 +151,8 @@ require ( github.com/ipfs/go-block-format v0.2.0 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-cidutil v0.1.0 // indirect - github.com/ipfs/go-ds-badger2 v0.1.3 // indirect - github.com/ipfs/go-ds-pebble v0.3.1-0.20240828032824-d745b9d3200b // indirect + github.com/ipfs/go-ds-badger2 v0.1.4 // indirect + github.com/ipfs/go-ds-pebble v0.5.0 // indirect github.com/ipfs/go-ipfs-delay v0.0.1 // indirect github.com/ipfs/go-ipfs-pq v0.0.3 // indirect github.com/ipfs/go-ipfs-util v0.0.3 // indirect @@ -162,6 +165,7 @@ require ( github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect + github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956 // indirect github.com/k0kubun/pp v3.0.1+incompatible // indirect github.com/kevinburke/go-bindata v3.24.0+incompatible // indirect github.com/klauspost/compress v1.17.11 // indirect @@ -197,7 +201,6 @@ require ( github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect @@ -211,17 +214,17 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/onflow/atree v0.10.0 // indirect - github.com/onflow/cadence v1.6.0 // indirect - github.com/onflow/flow-core-contracts/lib/go/contracts v1.6.1 // indirect - github.com/onflow/flow-core-contracts/lib/go/templates v1.6.1 // indirect + github.com/onflow/cadence v1.6.4-rc.1-public // indirect + github.com/onflow/flow-core-contracts/lib/go/contracts v1.7.4-0.20250825171851-5a0da78d0022 // indirect + github.com/onflow/flow-core-contracts/lib/go/templates v1.7.2-0.20250825171851-5a0da78d0022 // indirect github.com/onflow/flow-evm-bridge v0.1.0 // indirect github.com/onflow/flow-ft/lib/go/contracts v1.0.1 // indirect github.com/onflow/flow-ft/lib/go/templates v1.0.1 // indirect - github.com/onflow/flow-go-sdk v1.6.0 // indirect + github.com/onflow/flow-go-sdk v1.7.0 // indirect github.com/onflow/flow-nft/lib/go/contracts v1.2.4 // indirect github.com/onflow/flow-nft/lib/go/templates v1.2.1 // indirect - github.com/onflow/flow/protobuf/go/flow v0.4.10 // indirect - github.com/onflow/go-ethereum v1.15.10 // indirect + github.com/onflow/flow/protobuf/go/flow v0.4.11 // indirect + github.com/onflow/go-ethereum v1.13.4 // indirect github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 // indirect github.com/onflow/sdks v0.6.0-preview.1 // indirect github.com/onflow/wal v1.0.2 // indirect @@ -297,46 +300,46 @@ require ( github.com/zeebo/errs v1.4.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.34.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/sdk v1.35.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/otel/sdk v1.36.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.23.0 // indirect go.uber.org/mock v0.5.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.37.0 // indirect + golang.org/x/crypto v0.39.0 // indirect golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 // indirect - golang.org/x/mod v0.24.0 // indirect - golang.org/x/net v0.39.0 // indirect + golang.org/x/mod v0.25.0 // indirect + golang.org/x/net v0.41.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.14.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/term v0.31.0 // indirect - golang.org/x/text v0.24.0 // indirect - golang.org/x/time v0.11.0 // indirect - golang.org/x/tools v0.32.0 // indirect + golang.org/x/sync v0.15.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/term v0.32.0 // indirect + golang.org/x/text v0.26.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.33.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.16.0 // indirect - google.golang.org/api v0.232.0 // indirect + google.golang.org/api v0.241.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 // indirect + google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.4.0 // indirect nhooyr.io/websocket v1.8.7 // indirect - rsc.io/tmplfunc v0.0.3 // indirect ) replace github.com/onflow/flow-go => ../ diff --git a/insecure/go.sum b/insecure/go.sum index 538de7bda62..3af9edfab15 100644 --- a/insecure/go.sum +++ b/insecure/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= -cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -23,8 +23,8 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA= cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q= -cloud.google.com/go/auth v0.16.1 h1:XrXauHMd30LhQYVRHLGvJiYeczweKQXZxsTbV9TiguU= -cloud.google.com/go/auth v0.16.1/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= +cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4= +cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -33,18 +33,18 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v1.5.0 h1:QlLcVMhbLGOjRcGe6VTGGTyQib8dRLK2B/kYNV0+2xs= -cloud.google.com/go/iam v1.5.0/go.mod h1:U+DOtKQltF/LxPEtcDLoobcsZMilSRwR7mgNL7knOpo= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= -cloud.google.com/go/longrunning v0.6.6 h1:XJNDo5MUfMM05xK3ewpbSdmt7R2Zw+aQEMbdQR65Rbw= -cloud.google.com/go/longrunning v0.6.6/go.mod h1:hyeGJUrPHcx0u2Uu1UFSoYZLn4lkMrccJig0t4FI7yw= -cloud.google.com/go/monitoring v1.24.0 h1:csSKiCJ+WVRgNkRzzz3BPoGjFhjPY23ZTcaenToJxMM= -cloud.google.com/go/monitoring v1.24.0/go.mod h1:Bd1PRK5bmQBQNnuGwHBfUamAV1ys9049oEPHnn4pcsc= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= cloud.google.com/go/profiler v0.3.0 h1:R6y/xAeifaUXxd2x6w+jIwKxoKl8Cv5HJvcvASTPWJo= cloud.google.com/go/profiler v0.3.0/go.mod h1:9wYk9eY4iZHsev8TQb61kh3wiOiSyz/xOYixWPzweCU= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -59,8 +59,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= -cloud.google.com/go/trace v1.11.3 h1:c+I4YFjxRQjvAhRmSsmjpASUKq88chOX854ied0K/pE= -cloud.google.com/go/trace v1.11.3/go.mod h1:pt7zCYiDSQjC9Y2oqCsh9jF4GStB/hmjrYLsxRR27q8= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= @@ -69,11 +69,10 @@ dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= -github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0/go.mod h1:ZV4VOm0/eHR06JLrXWe09068dHpr3TRpY9Uo7T+anuA= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0 h1:nNMpRpnkWDAaqcpxMJvxa/Ud98gjbYwayJY4/9bdjiU= @@ -97,6 +96,8 @@ github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9 github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -117,47 +118,47 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= -github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2 v1.37.0 h1:YtCOESR/pN4j5oA7cVHSfOwIcuh/KwHC4DOSXFbv5F0= +github.com/aws/aws-sdk-go-v2 v1.37.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= github.com/aws/aws-sdk-go-v2/config v1.8.0/go.mod h1:w9+nMZ7soXCe5nT46Ri354SNhXDQ6v+V5wqDjnZE+GY= -github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM= -github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g= +github.com/aws/aws-sdk-go-v2/config v1.30.0 h1:XhzXYU2x/T441/0CBh0g6UUC/OFGk+FRpl3ThI8AqM8= +github.com/aws/aws-sdk-go-v2/config v1.30.0/go.mod h1:4j78A2ko2xc7SMLjjSUrgpp42vyneH9c8j3emf/CLTo= github.com/aws/aws-sdk-go-v2/credentials v1.4.0/go.mod h1:dgGR+Qq7Wjcd4AOAW5Rf5Tnv3+x7ed6kETXyS9WCuAY= -github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ= +github.com/aws/aws-sdk-go-v2/credentials v1.18.0 h1:r9W/BX4B1dEbsd2NogyuFXmEfYhdUULUVEOh0SDAovw= +github.com/aws/aws-sdk-go-v2/credentials v1.18.0/go.mod h1:SMtUJQRWEpyfC+ouDJNYdI7NNMqUjHM/Oaf0FV+vWNs= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0/go.mod h1:CpNzHK9VEFUCknu50kkB8z58AH2B5DvPP7ea1LHve/Y= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.17.0 h1:ouCRc4lCriJtCnrIN4Kw2tA/uETRZBrxwb/607gRvkE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.17.0/go.mod h1:LW9/PxQD1SYFC7pnWcgqPhoyZprhjEdg5hBK6qYPLW8= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1 h1:VGkV9KmhGqOQWnHyi4gLG98kE6OecT42fdrCGFWxJsc= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1/go.mod h1:PLlnMiki//sGnCJiW+aVpvP/C8Kcm8mEj/IVm9+9qk4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 h1:H2iZoqW/v2Jnrh1FnU725Bq6KJ0k2uP63yH+DcY+HUI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0/go.mod h1:L0FqLbwMXHvNC/7crWV1iIxUlOKYZUE8KuTIA+TozAI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 h1:EDped/rNzAhFPhVY0sDGbtD16OKqksfA8OjF/kLEgw8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0/go.mod h1:uUI335jvzpZRPpjYx6ODc/wg1qH+NnoSTK/FwVeK0C0= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2/go.mod h1:BQV0agm+JEhqR+2RT5e1XTFIDcAAV0eW6z2trp+iduw= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0/go.mod h1:R1KK+vY8AfalhG1AOu5e35pOD2SdoPKQCFLTvnxiohk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 h1:eRhU3Sh8dGbaniI6B+I48XJMrTPRkK4DKo+vqIxziOU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0/go.mod h1:paNLV18DZ6FnWE/bd06RIKPDIFpjuvCkGKWTG/GDBeM= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0 h1:HWsM0YQWX76V6MOp07YuTYacm8k7h69ObJuw7Nck+og= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0/go.mod h1:LKb3cKNQIMh+itGnEpKGcnL/6OIjPZqrtYah1w5f+3o= github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0 h1:nPLfLPfglacc29Y949sDxpr3X/blaY40s3B85WT2yZU= github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0/go.mod h1:Iv2aJVtVSm/D22rFoX99cLG4q4uB7tppuCsulGe98k4= github.com/aws/aws-sdk-go-v2/service/sso v1.4.0/go.mod h1:+1fpWnL96DL23aXPpMGbsmKe8jLTEfbjuQoA4WS1VaA= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= +github.com/aws/aws-sdk-go-v2/service/sso v1.26.0 h1:cuFWHH87GP1NBGXXfMicUbE7Oty5KpPxN6w4JpmuxYc= +github.com/aws/aws-sdk-go-v2/service/sso v1.26.0/go.mod h1:aJBemdlbCKyOXEXdXBqS7E+8S9XTDcOTaoOjtng54hA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.0 h1:t2va+wewPOYIqC6XyJ4MGjiGKkczMAPsgq5W4FtL9ME= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.0/go.mod h1:ExCTcqYqN0hYYRsDlBVU8+68grqlWdgX9/nZJwQW4aY= github.com/aws/aws-sdk-go-v2/service/sts v1.7.0/go.mod h1:0qcSMCyASQPN2sk/1KQLQ2Fh6yq8wm0HSDAimPhzCoM= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= +github.com/aws/aws-sdk-go-v2/service/sts v1.35.0 h1:FD9agdG4CeOGS3ORLByJk56YIXDS7mxFpmZyCtpqExc= +github.com/aws/aws-sdk-go-v2/service/sts v1.35.0/go.mod h1:NDzDPbBF1xtSTZUMuZx0w3hIfWzcL7X2AQ0Tr9becIQ= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= +github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -167,8 +168,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.17.0 h1:1X2TS7aHz1ELcC0yU1y2stUs/0ig5oMU6STFZGrhvHI= -github.com/bits-and-blooms/bitset v1.17.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU= +github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= @@ -204,28 +205,34 @@ github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 h1:bvJv505UUfjzbaIPdNS4AEkHreDqQk6yuNpsdRHpwFA= +github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056 h1:slXychO2uDM6hYRu4c0pD0udNI8uObfeKN6UInWViS8= +github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA= -github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= +github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= +github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= +github.com/cockroachdb/pebble/v2 v2.0.6 h1:eL54kX2AKp1ePJ/8vq4IO3xIEPpvVjlSP12dlLYilyE= +github.com/cockroachdb/pebble/v2 v2.0.6/go.mod h1:un1DXG73PKw3F7Ndd30YactyvsFviI9Fuhe0tENdnyA= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA= +github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/consensys/bavard v0.1.22 h1:Uw2CGvbXSZWhqK59X0VG/zOjpTFuOMcPLStrp1ihI0A= -github.com/consensys/bavard v0.1.22/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs= -github.com/consensys/gnark-crypto v0.14.0 h1:DDBdl4HaBtdQsq/wfMwJvZNE80sHidrK3Nfrefatm0E= -github.com/consensys/gnark-crypto v0.14.0/go.mod h1:CU4UijNPsHawiVGNxe9co07FkzCeWHHrb1li/n1XoU0= +github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= +github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= @@ -248,10 +255,10 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-eth-kzg v1.3.0 h1:05GrhASN9kDAidaFJOda6A4BEvgvuXbazXg/0E3OOdI= +github.com/crate-crypto/go-eth-kzg v1.3.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= -github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4= -github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= @@ -270,7 +277,6 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnN github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= -github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= @@ -296,6 +302,8 @@ github.com/ef-ds/deque v1.0.4/go.mod h1:gXDnTC3yqvBcHbq2lcExjtAcVrOnJCbMcZXmuj8Z github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -311,15 +319,17 @@ github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJP github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= -github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= -github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/ethereum/go-ethereum v1.13.10 h1:Ppdil79nN+Vc+mXfge0AuUgmKWuVv4eMqzoIVSdqZek= -github.com/ethereum/go-ethereum v1.13.10/go.mod h1:sc48XYQxCzH3fG9BcrXCOOgQk2JfZzNAmIKnceogzsA= +github.com/ethereum/c-kzg-4844/v2 v2.1.0 h1:gQropX9YFBhl3g4HYhwE70zq3IHFRgbbNPw0Shwzf5w= +github.com/ethereum/c-kzg-4844/v2 v2.1.0/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E= +github.com/ethereum/go-ethereum v1.16.2 h1:VDHqj86DaQiMpnMgc7l0rwZTg0FRmlz74yupSG5SnzI= +github.com/ethereum/go-ethereum v1.16.2/go.mod h1:X5CIOyo8SuK1Q5GnaEizQVLHT/DfsiGWuNeVdQcEMNA= github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= +github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -349,6 +359,8 @@ github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44 github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= @@ -363,8 +375,8 @@ github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3Bop github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= -github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= +github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -378,8 +390,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -417,8 +429,8 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -427,8 +439,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= -github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -465,11 +477,10 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -515,9 +526,7 @@ github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAx github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= -github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -528,8 +537,8 @@ github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= -github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= +github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= +github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= @@ -616,18 +625,16 @@ github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= -github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= -github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= -github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-datastore v0.8.2 h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U= +github.com/ipfs/go-datastore v0.8.2/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ds-badger2 v0.1.3 h1:Zo9JicXJ1DmXTN4KOw7oPXkspZ0AWHcAFCP1tQKnegg= -github.com/ipfs/go-ds-badger2 v0.1.3/go.mod h1:TPhhljfrgewjbtuL/tczP8dNrBYwwk+SdPYbms/NO9w= -github.com/ipfs/go-ds-pebble v0.3.1-0.20240828032824-d745b9d3200b h1:lby3w+96HfyjiFP4ODbcfr4j7pNza7g3XQywnNu+9Mc= -github.com/ipfs/go-ds-pebble v0.3.1-0.20240828032824-d745b9d3200b/go.mod h1:q8icEwk8lSpbPAsa7l9SPm6yt9Z+c4QePFyfKHYmdJw= +github.com/ipfs/go-ds-badger2 v0.1.4 h1:4EDTEDV/Ft/zr5AaJXp2IojvApwevlUD9uahMDONWTE= +github.com/ipfs/go-ds-badger2 v0.1.4/go.mod h1:6WOt9PzJ98Tu7gizJ35NuXDORsYxQ3c4/3gjqF+kq0c= +github.com/ipfs/go-ds-pebble v0.5.0 h1:lXffYCAKVD7nLLPqwJ9D8IxgO7Kz8woiX021tezdsIM= +github.com/ipfs/go-ds-pebble v0.5.0/go.mod h1:aiCRVcj3K60sxc6k5C+HO9C6rouqiSkjR/WKnbTcMfQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= @@ -640,7 +647,6 @@ github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9 github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= @@ -663,6 +669,8 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956 h1:4iii8SOozVG1lpkdPELRsjPEBhU4DeFPz2r2Fjj3UDU= +github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956/go.mod h1:qsnXMryYP9X7JbzskIn0+N40sE6XNXLr9kYRRP6rwXU= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -702,7 +710,6 @@ github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoK github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -815,9 +822,6 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= -github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= -github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -889,30 +893,30 @@ github.com/onflow/atree v0.10.0 h1:LFYlRgb0fjs8vezBW/N/tzi+ijLMssjHwIwoV4RwYaA= github.com/onflow/atree v0.10.0/go.mod h1:aqnnE8Os77JiBIeC7UcbeM7N1V3Ys5XWH0CykeMpym0= github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 h1:LpiQhTAfM9CAmNVEs0n//cBBgCg+vJSiIxTHYUklZ84= github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483/go.mod h1:pIZgTWdm3k3pLF9Uq6MB8JEcW07UDwNJjlXW1HELW80= -github.com/onflow/cadence v1.6.0 h1:nFHaEFvekL+9cXuO7w33w6Y7nC1X7PZZHQdSYfE8CvQ= -github.com/onflow/cadence v1.6.0/go.mod h1:MBHOSmj81EtNEGjvYK3UEaFMMrN6jo5wt9U7jvDVLUw= +github.com/onflow/cadence v1.6.4-rc.1-public h1:gIdOGie3gO/4I/I7R6Kou1LeWT9bW1k6XLMpmR/CN6c= +github.com/onflow/cadence v1.6.4-rc.1-public/go.mod h1:MBHOSmj81EtNEGjvYK3UEaFMMrN6jo5wt9U7jvDVLUw= github.com/onflow/crypto v0.25.3 h1:XQ3HtLsw8h1+pBN+NQ1JYM9mS2mVXTyg55OldaAIF7U= github.com/onflow/crypto v0.25.3/go.mod h1:+1igaXiK6Tjm9wQOBD1EGwW7bYWMUGKtwKJ/2QL/OWs= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.6.1 h1:n2NTsVT4iH7snqW6nkP1PnxHmgEMCh3XZpbdSIqweO4= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.6.1/go.mod h1:3tMXL4npVbk/F1X6SqxZWelQn0pyGvMBVbUXsgvC6Is= -github.com/onflow/flow-core-contracts/lib/go/templates v1.6.1 h1:Y0bDvS5fTOCrKr7QFl0by3qTq7MFnauVnHoxwW6nQzo= -github.com/onflow/flow-core-contracts/lib/go/templates v1.6.1/go.mod h1:pN768Al/wLRlf3bwugv9TyxniqJxMu4sxnX9eQJam64= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.7.4-0.20250825171851-5a0da78d0022 h1:RtNoSnri8LwUzlUeffpeBXjazSf7kqhFbHDOAu32bXc= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.7.4-0.20250825171851-5a0da78d0022/go.mod h1:/Yne6g7V2Fy1sm/vE78us221bYvVvL5cA8cOzN/uTCI= +github.com/onflow/flow-core-contracts/lib/go/templates v1.7.2-0.20250825171851-5a0da78d0022 h1:upAfrSTYm/a/Q7UMOE2sqJxsvgOGUkib7In59ogrwDI= +github.com/onflow/flow-core-contracts/lib/go/templates v1.7.2-0.20250825171851-5a0da78d0022/go.mod h1:yBkysayvSKZ/yFO3fEX4YQ/FEZtV6Tnov8ix0lBeiqM= github.com/onflow/flow-evm-bridge v0.1.0 h1:7X2osvo4NnQgHj8aERUmbYtv9FateX8liotoLnPL9nM= github.com/onflow/flow-evm-bridge v0.1.0/go.mod h1:5UYwsnu6WcBNrwitGFxphCl5yq7fbWYGYuiCSTVF6pk= github.com/onflow/flow-ft/lib/go/contracts v1.0.1 h1:Ts5ob+CoCY2EjEd0W6vdLJ7hLL3SsEftzXG2JlmSe24= github.com/onflow/flow-ft/lib/go/contracts v1.0.1/go.mod h1:PwsL8fC81cjnUnTfmyL/HOIyHnyaw/JA474Wfj2tl6A= github.com/onflow/flow-ft/lib/go/templates v1.0.1 h1:FDYKAiGowABtoMNusLuRCILIZDtVqJ/5tYI4VkF5zfM= github.com/onflow/flow-ft/lib/go/templates v1.0.1/go.mod h1:uQ8XFqmMK2jxyBSVrmyuwdWjTEb+6zGjRYotfDJ5pAE= -github.com/onflow/flow-go-sdk v1.6.0 h1:rN5T5Icva4GjV+RPiUlFm2FMmm0IkQ9f/B8SDlZHRu8= -github.com/onflow/flow-go-sdk v1.6.0/go.mod h1:EBcCMA9Bbjgp/A21i4qCthv9enV4CUYEVZoF8a68vMQ= +github.com/onflow/flow-go-sdk v1.7.0 h1:kSw94LZ+0ppt5ELqzixk7jjzkcrOR0Lh4mOgyu+KTOI= +github.com/onflow/flow-go-sdk v1.7.0/go.mod h1:a5JyRWg1mT6MoixnjTl/E/6AO95u/r2BBy7U/CycvUM= github.com/onflow/flow-nft/lib/go/contracts v1.2.4 h1:gWJgSSgIGo0qWOqr90+khQ69VoYF9vNlqzF+Yh6YYy4= github.com/onflow/flow-nft/lib/go/contracts v1.2.4/go.mod h1:eZ9VMMNfCq0ho6kV25xJn1kXeCfxnkhj3MwF3ed08gY= github.com/onflow/flow-nft/lib/go/templates v1.2.1 h1:SAALMZPDw9Eb9p5kSLnmnFxjyig1MLiT4JUlLp0/bSE= github.com/onflow/flow-nft/lib/go/templates v1.2.1/go.mod h1:W6hOWU0xltPqNpv9gQX8Pj8Jtf0OmRxc1XX2V0kzJaI= -github.com/onflow/flow/protobuf/go/flow v0.4.10 h1:CGEO3n96XZQd/k5HtkZyb90ouem9G+8fNcKyt8s2fvs= -github.com/onflow/flow/protobuf/go/flow v0.4.10/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= -github.com/onflow/go-ethereum v1.15.10 h1:blZBeOLJDOVWqKuhkkMh6S2PKQAJvdgbvOL9ZNggFcU= -github.com/onflow/go-ethereum v1.15.10/go.mod h1:t2nZJtwruVjA5u5yEK8InFzjImFLHrF7ak2bw3E4LDM= +github.com/onflow/flow/protobuf/go/flow v0.4.11 h1:EYjaATmor1ONczbvJ6VejQAJK4elFRk5h9AapxQ7j4c= +github.com/onflow/flow/protobuf/go/flow v0.4.11/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/onflow/go-ethereum v1.13.4 h1:iNO86fm8RbBbhZ87ZulblInqCdHnAQVY8okBrNsTevc= +github.com/onflow/go-ethereum v1.13.4/go.mod h1:cE/gEUkAffhwbVmMJYz+t1dAfVNHNwZCgc3BWtZxBGY= github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 h1:sxyWLqGm/p4EKT6DUlQESDG1ZNMN9GjPCm1gTq7NGfc= github.com/onflow/nft-storefront/lib/go/contracts v1.0.0/go.mod h1:kMeq9zUwCrgrSojEbTUTTJpZ4WwacVm2pA7LVFr+glk= github.com/onflow/sdks v0.6.0-preview.1 h1:mb/cUezuqWEP1gFZNAgUI4boBltudv4nlfxke1KBp9k= @@ -1061,6 +1065,8 @@ github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4= +github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs= github.com/psiemens/sconfig v0.1.0 h1:xfWqW+TRpih7mXZIqKYTmpRhlZLQ1kbxV8EjllPv76s= github.com/psiemens/sconfig v0.1.0/go.mod h1:+MLKqdledP/8G3rOBpknbLh0IclCf4WneJUtS26JB2U= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= @@ -1162,8 +1168,9 @@ github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmq github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= @@ -1276,28 +1283,28 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1353,8 +1360,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1395,8 +1402,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= -golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= +golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1455,8 +1462,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= -golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1484,8 +1491,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= -golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1559,7 +1566,6 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1574,8 +1580,8 @@ golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -1584,8 +1590,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1599,14 +1605,14 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= -golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1668,8 +1674,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= -golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1701,8 +1707,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.232.0 h1:qGnmaIMf7KcuwHOlF3mERVzChloDYwRfOJOrHt8YC3I= -google.golang.org/api v0.232.0/go.mod h1:p9QCfBWZk1IJETUdbTKloR5ToFdKbYh2fkjsUL6vNoY= +google.golang.org/api v0.241.0 h1:QKwqWQlkc6O895LchPEDUSYr22Xp3NCxpQRiWTB6avE= +google.golang.org/api v0.241.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1757,14 +1763,14 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= -google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e h1:UdXH7Kzbj+Vzastr5nVfccbmFsmYNygVLSPk1pEfDoY= -google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e/go.mod h1:085qFyf2+XaZlRdCgKNCIZ3afY2p4HHZdoIRpId8F4A= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20250428153025-10db94c68c34 h1:nfEb4Q4usCEhvyA4vmf47NmO3alop2ab5p5gupICWU4= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20250428153025-10db94c68c34/go.mod h1:h6yxum/C2qRb4txaZRLDHK8RyS0H/o2oEDeKY4onY/Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 h1:h6p3mQqrmT1XkHVTfzLdNz1u7IhINeZkz67/xTbOuWs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= +google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a h1:SGktgSolFCo75dnHJF2yMvnns6jCmHFJ0vE4Vn2JKvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a/go.mod h1:a77HrdMjoeKbnd2jmgcWdaS++ZLZAEq3orIOAEIKiVw= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20250603155806-513f23925822 h1:zWFRixYR5QlotL+Uv3YfsPRENIrQFXiGs+iwqel6fOQ= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20250603155806-513f23925822/go.mod h1:h6yxum/C2qRb4txaZRLDHK8RyS0H/o2oEDeKY4onY/Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1789,8 +1795,8 @@ google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= -google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= +google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1861,8 +1867,6 @@ pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= -rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= diff --git a/integration/benchmark/load/evm_batch_load.go b/integration/benchmark/load/evm_batch_load.go index 1cbaa7800ae..146136fa789 100644 --- a/integration/benchmark/load/evm_batch_load.go +++ b/integration/benchmark/load/evm_batch_load.go @@ -9,11 +9,11 @@ import ( "math/big" "time" + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/onflow/cadence" - gethcommon "github.com/onflow/go-ethereum/common" - "github.com/onflow/go-ethereum/core/types" - "github.com/onflow/go-ethereum/crypto" - "github.com/onflow/go-ethereum/params" "github.com/rs/zerolog" "go.uber.org/atomic" "golang.org/x/sync/errgroup" diff --git a/integration/benchmark/load/evm_load.go b/integration/benchmark/load/evm_load.go index 241c315bea6..ed592a5773a 100644 --- a/integration/benchmark/load/evm_load.go +++ b/integration/benchmark/load/evm_load.go @@ -9,11 +9,11 @@ import ( "math/big" "time" + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/onflow/cadence" - gethcommon "github.com/onflow/go-ethereum/common" - "github.com/onflow/go-ethereum/core/types" - "github.com/onflow/go-ethereum/crypto" - "github.com/onflow/go-ethereum/params" "github.com/rs/zerolog" "go.uber.org/atomic" "golang.org/x/sync/errgroup" diff --git a/integration/benchnet2/automate/templates/helm-values-all-nodes.yml b/integration/benchnet2/automate/templates/helm-values-all-nodes.yml index 8ac3d7f8cb2..5da07ee549a 100644 --- a/integration/benchnet2/automate/templates/helm-values-all-nodes.yml +++ b/integration/benchnet2/automate/templates/helm-values-all-nodes.yml @@ -16,6 +16,7 @@ access: {{$val.name}}: args:{{template "args" .}} - --loglevel=INFO + - --admin-addr=0.0.0.0:9002 - --rpc-addr=0.0.0.0:9000 - --secure-rpc-addr=0.0.0.0:9001 - --http-addr=0.0.0.0:8000 @@ -44,6 +45,7 @@ collection: {{$val.name}}: args:{{template "args" .}} - --loglevel=INFO + - --admin-addr=0.0.0.0:9002 - --block-rate-delay=950ms - --ingress-addr=0.0.0.0:9000 - --insecure-access-api=false @@ -67,8 +69,10 @@ consensus: {{$val.name}}: args:{{template "args" .}} - --loglevel=DEBUG - - --block-rate-delay=800ms - - --cruise-ctl-max-view-duration=2s + - --admin-addr=0.0.0.0:9002 + # Benchnet networks use default 1bps timing + - --cruise-ctl-max-view-duration=1500ms + - --hotstuff-min-timeout=2s - --chunk-alpha=1 - --emergency-sealing-active=false - --insecure-access-api=false @@ -92,6 +96,7 @@ execution: {{$val.name}}: args:{{template "args" .}} - --loglevel=INFO + - --admin-addr=0.0.0.0:9002 - --rpc-addr=0.0.0.0:9000 - --cadence-tracing=false - --extensive-tracing=false @@ -115,6 +120,7 @@ verification: {{$val.name}}: args:{{template "args" .}} - --loglevel=INFO + - --admin-addr=0.0.0.0:9002 - --chunk-alpha=1 env:{{template "env" .}} image: {{$val.docker_registry}}/verification:{{$val.docker_tag}} diff --git a/integration/benchnet2/flow/templates/access.yml b/integration/benchnet2/flow/templates/access.yml index d9a68f235b4..91a28035201 100644 --- a/integration/benchnet2/flow/templates/access.yml +++ b/integration/benchnet2/flow/templates/access.yml @@ -33,6 +33,9 @@ spec: role: access service: flow network: {{ $.Values.networkId }} + {{- if contains "access1-" $k }} + pyroscope.io/scrape: "true" + {{- end }} spec: nodeSelector: iam.gke.io/gke-metadata-server-enabled: "true" diff --git a/integration/benchnet2/flow/templates/collection.yml b/integration/benchnet2/flow/templates/collection.yml index e73d00f7b18..b4f59a203e5 100644 --- a/integration/benchnet2/flow/templates/collection.yml +++ b/integration/benchnet2/flow/templates/collection.yml @@ -32,6 +32,9 @@ spec: role: collection service: flow network: {{ $.Values.networkId }} + {{- if contains "collection1-" $k }} + pyroscope.io/scrape: "true" + {{- end }} spec: nodeSelector: iam.gke.io/gke-metadata-server-enabled: "true" diff --git a/integration/benchnet2/flow/templates/consensus.yml b/integration/benchnet2/flow/templates/consensus.yml index ddfbf84380a..04e2126156b 100644 --- a/integration/benchnet2/flow/templates/consensus.yml +++ b/integration/benchnet2/flow/templates/consensus.yml @@ -32,6 +32,9 @@ spec: role: consensus service: flow network: {{ $.Values.networkId }} + {{- if contains "consensus1-" $k }} + pyroscope.io/scrape: "true" + {{- end }} spec: nodeSelector: iam.gke.io/gke-metadata-server-enabled: "true" diff --git a/integration/benchnet2/flow/templates/execution.yml b/integration/benchnet2/flow/templates/execution.yml index 67e6daad85d..a6152d40035 100644 --- a/integration/benchnet2/flow/templates/execution.yml +++ b/integration/benchnet2/flow/templates/execution.yml @@ -32,6 +32,9 @@ spec: role: execution service: flow network: {{ $.Values.networkId }} + {{- if contains "execution1-" $k }} + pyroscope.io/scrape: "true" + {{- end }} spec: nodeSelector: iam.gke.io/gke-metadata-server-enabled: "true" diff --git a/integration/benchnet2/flow/templates/verification.yml b/integration/benchnet2/flow/templates/verification.yml index 02d7870e131..51d2a4bab11 100644 --- a/integration/benchnet2/flow/templates/verification.yml +++ b/integration/benchnet2/flow/templates/verification.yml @@ -33,6 +33,9 @@ spec: role: verification owner: {{ $.Values.owner }} service: flow + {{- if contains "verification1-" $k }} + pyroscope.io/scrape: "true" + {{- end }} spec: nodeSelector: iam.gke.io/gke-metadata-server-enabled: "true" diff --git a/integration/dkg/dkg_whiteboard_test.go b/integration/dkg/dkg_whiteboard_test.go index d0453d7d064..5050d2338e5 100644 --- a/integration/dkg/dkg_whiteboard_test.go +++ b/integration/dkg/dkg_whiteboard_test.go @@ -251,7 +251,8 @@ func TestWithWhiteboard(t *testing.T) { bootstrapNodesInfo, currentEpochSetup, nextEpochSetup, - firstBlock) + firstBlock, + ) for _, node := range nodes { node.Start() diff --git a/integration/epochs/epoch_qc_test.go b/integration/epochs/epoch_qc_test.go index 492eab64e34..0a4e428a78d 100644 --- a/integration/epochs/epoch_qc_test.go +++ b/integration/epochs/epoch_qc_test.go @@ -9,6 +9,7 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" sdk "github.com/onflow/flow-go-sdk" @@ -93,7 +94,14 @@ func (s *Suite) TestEpochQuorumCertificate() { signature, err := stakingPrivKey.Sign(voteMessage, hasher) s.Require().NoError(err) - vote := hotstuffmodel.VoteFromFlow(nodeID, blockID, view, signature) + vote, err := hotstuffmodel.NewVote(hotstuffmodel.UntrustedVote{ + View: view, + BlockID: blockID, + SignerID: nodeID, + SigData: signature, + }) + require.NoError(s.T(), err) + hotSigner := &hotstuff.Signer{} hotSigner.On("CreateVote", mock.Anything).Return(vote, nil) diff --git a/integration/go.mod b/integration/go.mod index ca7037fd0e1..8ff35fce418 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -3,7 +3,7 @@ module github.com/onflow/flow-go/integration go 1.23.7 require ( - cloud.google.com/go/bigquery v1.66.2 + cloud.google.com/go/bigquery v1.67.0 github.com/VividCortex/ewma v1.2.0 github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 github.com/coreos/go-semver v0.3.0 @@ -11,24 +11,24 @@ require ( github.com/dgraph-io/badger/v2 v2.2007.4 github.com/docker/docker v24.0.6+incompatible github.com/docker/go-connections v0.4.0 + github.com/ethereum/go-ethereum v1.16.2 github.com/go-git/go-git/v5 v5.11.0 github.com/go-yaml/yaml v2.1.0+incompatible github.com/gorilla/websocket v1.5.3 github.com/ipfs/boxo v0.27.2 github.com/ipfs/go-cid v0.4.1 - github.com/ipfs/go-datastore v0.6.0 - github.com/ipfs/go-ds-badger2 v0.1.3 - github.com/ipfs/go-ds-pebble v0.3.1-0.20240828032824-d745b9d3200b + github.com/ipfs/go-datastore v0.8.2 + github.com/ipfs/go-ds-badger2 v0.1.4 + github.com/ipfs/go-ds-pebble v0.5.0 github.com/libp2p/go-libp2p v0.38.2 - github.com/onflow/cadence v1.6.0 + github.com/onflow/cadence v1.6.4-rc.1-public github.com/onflow/crypto v0.25.3 - github.com/onflow/flow-core-contracts/lib/go/contracts v1.6.1 - github.com/onflow/flow-core-contracts/lib/go/templates v1.6.1 + github.com/onflow/flow-core-contracts/lib/go/contracts v1.7.4-0.20250825171851-5a0da78d0022 + github.com/onflow/flow-core-contracts/lib/go/templates v1.7.2-0.20250825171851-5a0da78d0022 github.com/onflow/flow-go v0.38.0-preview.0.0.20241021221952-af9cd6e99de1 - github.com/onflow/flow-go-sdk v1.6.0 + github.com/onflow/flow-go-sdk v1.7.0 github.com/onflow/flow-go/insecure v0.0.0-00010101000000-000000000000 - github.com/onflow/flow/protobuf/go/flow v0.4.10 - github.com/onflow/go-ethereum v1.15.10 + github.com/onflow/flow/protobuf/go/flow v0.4.11 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.61.0 @@ -39,25 +39,25 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/mock v0.5.0 golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 - golang.org/x/sync v0.14.0 - google.golang.org/grpc v1.72.0 + golang.org/x/sync v0.15.0 + google.golang.org/grpc v1.74.2 google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v3 v3.0.1 ) require ( - cel.dev/expr v0.20.0 // indirect + cel.dev/expr v0.24.0 // indirect cloud.google.com/go v0.120.0 // indirect - cloud.google.com/go/auth v0.16.1 // indirect + cloud.google.com/go/auth v0.16.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect - cloud.google.com/go/compute/metadata v0.6.0 // indirect - cloud.google.com/go/iam v1.5.0 // indirect - cloud.google.com/go/monitoring v1.24.0 // indirect + cloud.google.com/go/compute/metadata v0.7.0 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect cloud.google.com/go/storage v1.50.0 // indirect dario.cat/mergo v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/DataDog/zstd v1.5.2 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 // indirect + github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 // indirect github.com/Jorropo/jsync v1.0.1 // indirect @@ -67,44 +67,45 @@ require ( github.com/StackExchange/wmi v1.2.1 // indirect github.com/VictoriaMetrics/fastcache v1.12.2 // indirect github.com/apache/arrow/go/v15 v15.0.2 // indirect - github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect - github.com/aws/aws-sdk-go-v2/config v1.29.14 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect + github.com/aws/aws-sdk-go-v2 v1.37.0 // indirect + github.com/aws/aws-sdk-go-v2/config v1.30.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.17.0 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0 // indirect github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect - github.com/aws/smithy-go v1.22.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.26.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.35.0 // indirect + github.com/aws/smithy-go v1.22.5 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.17.0 // indirect + github.com/bits-and-blooms/bitset v1.20.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.3.3 // indirect - github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect + github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 // indirect github.com/cockroachdb/errors v1.11.3 // indirect github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v1.1.2 // indirect + github.com/cockroachdb/pebble/v2 v2.0.6 // indirect github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect - github.com/consensys/bavard v0.1.22 // indirect - github.com/consensys/gnark-crypto v0.14.0 // indirect + github.com/consensys/gnark-crypto v0.18.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/fifo v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/crate-crypto/go-eth-kzg v1.3.0 // indirect github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect - github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -122,13 +123,14 @@ require ( github.com/dustin/go-humanize v1.0.1 // indirect github.com/ef-ds/deque v1.0.4 // indirect github.com/elastic/gosigar v0.14.3 // indirect + github.com/emicklei/dot v1.6.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect - github.com/ethereum/c-kzg-4844 v1.0.0 // indirect - github.com/ethereum/go-ethereum v1.13.10 // indirect + github.com/ethereum/c-kzg-4844/v2 v2.1.0 // indirect github.com/ethereum/go-verkle v0.2.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/ferranbt/fastssz v0.1.4 // indirect github.com/filecoin-project/go-clock v0.1.0 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect @@ -141,11 +143,11 @@ require ( github.com/getsentry/sentry-go v0.27.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-jose/go-jose/v4 v4.0.4 // indirect + github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-playground/locales v0.14.1 // indirect @@ -154,20 +156,20 @@ require ( github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/goccy/go-json v0.10.4 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.2.4 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect github.com/google/flatbuffers v23.5.26+incompatible // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect - github.com/googleapis/gax-go/v2 v2.14.1 // indirect + github.com/googleapis/gax-go/v2 v2.14.2 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect @@ -199,6 +201,7 @@ require ( github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect + github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956 // indirect github.com/k0kubun/pp v3.0.1+incompatible // indirect github.com/kevinburke/go-bindata v3.24.0+incompatible // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect @@ -236,7 +239,6 @@ require ( github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect @@ -258,6 +260,7 @@ require ( github.com/onflow/flow-ft/lib/go/templates v1.0.1 // indirect github.com/onflow/flow-nft/lib/go/contracts v1.2.4 // indirect github.com/onflow/flow-nft/lib/go/templates v1.2.1 // indirect + github.com/onflow/go-ethereum v1.13.4 // indirect github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 // indirect github.com/onflow/sdks v0.6.0-preview.1 // indirect github.com/onflow/wal v1.0.2 // indirect @@ -318,7 +321,7 @@ require ( github.com/spf13/cast v1.5.0 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/spf13/viper v1.15.0 // indirect github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/stretchr/objx v0.5.2 // indirect @@ -341,42 +344,42 @@ require ( github.com/zeebo/xxh3 v1.0.2 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.34.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/sdk v1.35.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/otel/sdk v1.36.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.37.0 // indirect - golang.org/x/mod v0.24.0 // indirect - golang.org/x/net v0.39.0 // indirect + golang.org/x/crypto v0.39.0 // indirect + golang.org/x/mod v0.25.0 // indirect + golang.org/x/net v0.41.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/term v0.31.0 // indirect - golang.org/x/text v0.24.0 // indirect - golang.org/x/time v0.11.0 // indirect - golang.org/x/tools v0.32.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/term v0.32.0 // indirect + golang.org/x/text v0.26.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.33.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.16.0 // indirect - google.golang.org/api v0.232.0 // indirect + google.golang.org/api v0.241.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 // indirect + google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect lukechampine.com/blake3 v1.4.0 // indirect - rsc.io/tmplfunc v0.0.3 // indirect ) replace github.com/onflow/flow-go => ../ diff --git a/integration/go.sum b/integration/go.sum index a0e8aaf72d8..e07f5c7cdb9 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -1,35 +1,35 @@ -cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= -cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA= cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q= -cloud.google.com/go/auth v0.16.1 h1:XrXauHMd30LhQYVRHLGvJiYeczweKQXZxsTbV9TiguU= -cloud.google.com/go/auth v0.16.1/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= +cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4= +cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/bigquery v1.66.2 h1:EKOSqjtO7jPpJoEzDmRctGea3c2EOGoexy8VyY9dNro= -cloud.google.com/go/bigquery v1.66.2/go.mod h1:+Yd6dRyW8D/FYEjUGodIbu0QaoEmgav7Lwhotup6njo= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= -cloud.google.com/go/datacatalog v1.24.3 h1:3bAfstDB6rlHyK0TvqxEwaeOvoN9UgCs2bn03+VXmss= -cloud.google.com/go/datacatalog v1.24.3/go.mod h1:Z4g33XblDxWGHngDzcpfeOU0b1ERlDPTuQoYG6NkF1s= -cloud.google.com/go/iam v1.5.0 h1:QlLcVMhbLGOjRcGe6VTGGTyQib8dRLK2B/kYNV0+2xs= -cloud.google.com/go/iam v1.5.0/go.mod h1:U+DOtKQltF/LxPEtcDLoobcsZMilSRwR7mgNL7knOpo= +cloud.google.com/go/bigquery v1.67.0 h1:GXleMyn/cu5+DPLy9Rz5f5IULWTLrepwbQnP/5qrVbY= +cloud.google.com/go/bigquery v1.67.0/go.mod h1:HQeP1AHFuAz0Y55heDSb0cjZIhnEkuwFRBGo6EEKHug= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= +cloud.google.com/go/datacatalog v1.26.0 h1:eFgygb3DTufTWWUB8ARk+dSuXz+aefNJXTlkWlQcWwE= +cloud.google.com/go/datacatalog v1.26.0/go.mod h1:bLN2HLBAwB3kLTFT5ZKLHVPj/weNz6bR0c7nYp0LE14= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= -cloud.google.com/go/longrunning v0.6.6 h1:XJNDo5MUfMM05xK3ewpbSdmt7R2Zw+aQEMbdQR65Rbw= -cloud.google.com/go/longrunning v0.6.6/go.mod h1:hyeGJUrPHcx0u2Uu1UFSoYZLn4lkMrccJig0t4FI7yw= -cloud.google.com/go/monitoring v1.24.0 h1:csSKiCJ+WVRgNkRzzz3BPoGjFhjPY23ZTcaenToJxMM= -cloud.google.com/go/monitoring v1.24.0/go.mod h1:Bd1PRK5bmQBQNnuGwHBfUamAV1ys9049oEPHnn4pcsc= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= cloud.google.com/go/profiler v0.3.0 h1:R6y/xAeifaUXxd2x6w+jIwKxoKl8Cv5HJvcvASTPWJo= cloud.google.com/go/profiler v0.3.0/go.mod h1:9wYk9eY4iZHsev8TQb61kh3wiOiSyz/xOYixWPzweCU= cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= -cloud.google.com/go/trace v1.11.3 h1:c+I4YFjxRQjvAhRmSsmjpASUKq88chOX854ied0K/pE= -cloud.google.com/go/trace v1.11.3/go.mod h1:pt7zCYiDSQjC9Y2oqCsh9jF4GStB/hmjrYLsxRR27q8= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= @@ -41,11 +41,10 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOEl github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= -github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0/go.mod h1:ZV4VOm0/eHR06JLrXWe09068dHpr3TRpY9Uo7T+anuA= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0 h1:nNMpRpnkWDAaqcpxMJvxa/Ud98gjbYwayJY4/9bdjiU= @@ -71,6 +70,8 @@ github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjC github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= @@ -84,47 +85,47 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= -github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2 v1.37.0 h1:YtCOESR/pN4j5oA7cVHSfOwIcuh/KwHC4DOSXFbv5F0= +github.com/aws/aws-sdk-go-v2 v1.37.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= github.com/aws/aws-sdk-go-v2/config v1.8.0/go.mod h1:w9+nMZ7soXCe5nT46Ri354SNhXDQ6v+V5wqDjnZE+GY= -github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM= -github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g= +github.com/aws/aws-sdk-go-v2/config v1.30.0 h1:XhzXYU2x/T441/0CBh0g6UUC/OFGk+FRpl3ThI8AqM8= +github.com/aws/aws-sdk-go-v2/config v1.30.0/go.mod h1:4j78A2ko2xc7SMLjjSUrgpp42vyneH9c8j3emf/CLTo= github.com/aws/aws-sdk-go-v2/credentials v1.4.0/go.mod h1:dgGR+Qq7Wjcd4AOAW5Rf5Tnv3+x7ed6kETXyS9WCuAY= -github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ= +github.com/aws/aws-sdk-go-v2/credentials v1.18.0 h1:r9W/BX4B1dEbsd2NogyuFXmEfYhdUULUVEOh0SDAovw= +github.com/aws/aws-sdk-go-v2/credentials v1.18.0/go.mod h1:SMtUJQRWEpyfC+ouDJNYdI7NNMqUjHM/Oaf0FV+vWNs= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0/go.mod h1:CpNzHK9VEFUCknu50kkB8z58AH2B5DvPP7ea1LHve/Y= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.17.0 h1:ouCRc4lCriJtCnrIN4Kw2tA/uETRZBrxwb/607gRvkE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.17.0/go.mod h1:LW9/PxQD1SYFC7pnWcgqPhoyZprhjEdg5hBK6qYPLW8= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1 h1:VGkV9KmhGqOQWnHyi4gLG98kE6OecT42fdrCGFWxJsc= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1/go.mod h1:PLlnMiki//sGnCJiW+aVpvP/C8Kcm8mEj/IVm9+9qk4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 h1:H2iZoqW/v2Jnrh1FnU725Bq6KJ0k2uP63yH+DcY+HUI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0/go.mod h1:L0FqLbwMXHvNC/7crWV1iIxUlOKYZUE8KuTIA+TozAI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 h1:EDped/rNzAhFPhVY0sDGbtD16OKqksfA8OjF/kLEgw8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0/go.mod h1:uUI335jvzpZRPpjYx6ODc/wg1qH+NnoSTK/FwVeK0C0= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2/go.mod h1:BQV0agm+JEhqR+2RT5e1XTFIDcAAV0eW6z2trp+iduw= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0/go.mod h1:R1KK+vY8AfalhG1AOu5e35pOD2SdoPKQCFLTvnxiohk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 h1:eRhU3Sh8dGbaniI6B+I48XJMrTPRkK4DKo+vqIxziOU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0/go.mod h1:paNLV18DZ6FnWE/bd06RIKPDIFpjuvCkGKWTG/GDBeM= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0 h1:HWsM0YQWX76V6MOp07YuTYacm8k7h69ObJuw7Nck+og= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0/go.mod h1:LKb3cKNQIMh+itGnEpKGcnL/6OIjPZqrtYah1w5f+3o= github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0 h1:nPLfLPfglacc29Y949sDxpr3X/blaY40s3B85WT2yZU= github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0/go.mod h1:Iv2aJVtVSm/D22rFoX99cLG4q4uB7tppuCsulGe98k4= github.com/aws/aws-sdk-go-v2/service/sso v1.4.0/go.mod h1:+1fpWnL96DL23aXPpMGbsmKe8jLTEfbjuQoA4WS1VaA= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= +github.com/aws/aws-sdk-go-v2/service/sso v1.26.0 h1:cuFWHH87GP1NBGXXfMicUbE7Oty5KpPxN6w4JpmuxYc= +github.com/aws/aws-sdk-go-v2/service/sso v1.26.0/go.mod h1:aJBemdlbCKyOXEXdXBqS7E+8S9XTDcOTaoOjtng54hA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.0 h1:t2va+wewPOYIqC6XyJ4MGjiGKkczMAPsgq5W4FtL9ME= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.0/go.mod h1:ExCTcqYqN0hYYRsDlBVU8+68grqlWdgX9/nZJwQW4aY= github.com/aws/aws-sdk-go-v2/service/sts v1.7.0/go.mod h1:0qcSMCyASQPN2sk/1KQLQ2Fh6yq8wm0HSDAimPhzCoM= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= +github.com/aws/aws-sdk-go-v2/service/sts v1.35.0 h1:FD9agdG4CeOGS3ORLByJk56YIXDS7mxFpmZyCtpqExc= +github.com/aws/aws-sdk-go-v2/service/sts v1.35.0/go.mod h1:NDzDPbBF1xtSTZUMuZx0w3hIfWzcL7X2AQ0Tr9becIQ= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= +github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -133,8 +134,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.17.0 h1:1X2TS7aHz1ELcC0yU1y2stUs/0ig5oMU6STFZGrhvHI= -github.com/bits-and-blooms/bitset v1.17.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU= +github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= @@ -156,26 +157,32 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 h1:bvJv505UUfjzbaIPdNS4AEkHreDqQk6yuNpsdRHpwFA= +github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= +github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056 h1:slXychO2uDM6hYRu4c0pD0udNI8uObfeKN6UInWViS8= +github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA= -github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= +github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= +github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= +github.com/cockroachdb/pebble/v2 v2.0.6 h1:eL54kX2AKp1ePJ/8vq4IO3xIEPpvVjlSP12dlLYilyE= +github.com/cockroachdb/pebble/v2 v2.0.6/go.mod h1:un1DXG73PKw3F7Ndd30YactyvsFviI9Fuhe0tENdnyA= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA= +github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= -github.com/consensys/bavard v0.1.22 h1:Uw2CGvbXSZWhqK59X0VG/zOjpTFuOMcPLStrp1ihI0A= -github.com/consensys/bavard v0.1.22/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs= -github.com/consensys/gnark-crypto v0.14.0 h1:DDBdl4HaBtdQsq/wfMwJvZNE80sHidrK3Nfrefatm0E= -github.com/consensys/gnark-crypto v0.14.0/go.mod h1:CU4UijNPsHawiVGNxe9co07FkzCeWHHrb1li/n1XoU0= +github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= +github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= @@ -199,10 +206,10 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-eth-kzg v1.3.0 h1:05GrhASN9kDAidaFJOda6A4BEvgvuXbazXg/0E3OOdI= +github.com/crate-crypto/go-eth-kzg v1.3.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= -github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4= -github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -225,7 +232,6 @@ github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5il github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= @@ -263,6 +269,8 @@ github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/u github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -277,15 +285,17 @@ github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJP github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= -github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= -github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/ethereum/go-ethereum v1.13.10 h1:Ppdil79nN+Vc+mXfge0AuUgmKWuVv4eMqzoIVSdqZek= -github.com/ethereum/go-ethereum v1.13.10/go.mod h1:sc48XYQxCzH3fG9BcrXCOOgQk2JfZzNAmIKnceogzsA= +github.com/ethereum/c-kzg-4844/v2 v2.1.0 h1:gQropX9YFBhl3g4HYhwE70zq3IHFRgbbNPw0Shwzf5w= +github.com/ethereum/c-kzg-4844/v2 v2.1.0/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E= +github.com/ethereum/go-ethereum v1.16.2 h1:VDHqj86DaQiMpnMgc7l0rwZTg0FRmlz74yupSG5SnzI= +github.com/ethereum/go-ethereum v1.16.2/go.mod h1:X5CIOyo8SuK1Q5GnaEizQVLHT/DfsiGWuNeVdQcEMNA= github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/fanliao/go-promise v0.0.0-20141029170127-1890db352a72/go.mod h1:PjfxuH4FZdUyfMdtBio2lsRr1AKEaVPwelzuHuh8Lqc= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= +github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -313,6 +323,8 @@ github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44 github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= @@ -328,8 +340,8 @@ github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMj github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= -github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= -github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= +github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= @@ -340,8 +352,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -367,16 +379,17 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= -github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -402,11 +415,10 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= @@ -441,8 +453,6 @@ github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAx github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= -github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -452,8 +462,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= -github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= -github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= +github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= +github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -509,18 +519,16 @@ github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= -github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= -github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= -github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-datastore v0.8.2 h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U= +github.com/ipfs/go-datastore v0.8.2/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ds-badger2 v0.1.3 h1:Zo9JicXJ1DmXTN4KOw7oPXkspZ0AWHcAFCP1tQKnegg= -github.com/ipfs/go-ds-badger2 v0.1.3/go.mod h1:TPhhljfrgewjbtuL/tczP8dNrBYwwk+SdPYbms/NO9w= -github.com/ipfs/go-ds-pebble v0.3.1-0.20240828032824-d745b9d3200b h1:lby3w+96HfyjiFP4ODbcfr4j7pNza7g3XQywnNu+9Mc= -github.com/ipfs/go-ds-pebble v0.3.1-0.20240828032824-d745b9d3200b/go.mod h1:q8icEwk8lSpbPAsa7l9SPm6yt9Z+c4QePFyfKHYmdJw= +github.com/ipfs/go-ds-badger2 v0.1.4 h1:4EDTEDV/Ft/zr5AaJXp2IojvApwevlUD9uahMDONWTE= +github.com/ipfs/go-ds-badger2 v0.1.4/go.mod h1:6WOt9PzJ98Tu7gizJ35NuXDORsYxQ3c4/3gjqF+kq0c= +github.com/ipfs/go-ds-pebble v0.5.0 h1:lXffYCAKVD7nLLPqwJ9D8IxgO7Kz8woiX021tezdsIM= +github.com/ipfs/go-ds-pebble v0.5.0/go.mod h1:aiCRVcj3K60sxc6k5C+HO9C6rouqiSkjR/WKnbTcMfQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= @@ -533,7 +541,6 @@ github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9 github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= @@ -557,6 +564,8 @@ github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0 github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956 h1:4iii8SOozVG1lpkdPELRsjPEBhU4DeFPz2r2Fjj3UDU= +github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956/go.mod h1:qsnXMryYP9X7JbzskIn0+N40sE6XNXLr9kYRRP6rwXU= github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= @@ -593,7 +602,6 @@ github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -696,9 +704,6 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= -github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= -github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= @@ -760,30 +765,30 @@ github.com/onflow/atree v0.10.0 h1:LFYlRgb0fjs8vezBW/N/tzi+ijLMssjHwIwoV4RwYaA= github.com/onflow/atree v0.10.0/go.mod h1:aqnnE8Os77JiBIeC7UcbeM7N1V3Ys5XWH0CykeMpym0= github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 h1:LpiQhTAfM9CAmNVEs0n//cBBgCg+vJSiIxTHYUklZ84= github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483/go.mod h1:pIZgTWdm3k3pLF9Uq6MB8JEcW07UDwNJjlXW1HELW80= -github.com/onflow/cadence v1.6.0 h1:nFHaEFvekL+9cXuO7w33w6Y7nC1X7PZZHQdSYfE8CvQ= -github.com/onflow/cadence v1.6.0/go.mod h1:MBHOSmj81EtNEGjvYK3UEaFMMrN6jo5wt9U7jvDVLUw= +github.com/onflow/cadence v1.6.4-rc.1-public h1:gIdOGie3gO/4I/I7R6Kou1LeWT9bW1k6XLMpmR/CN6c= +github.com/onflow/cadence v1.6.4-rc.1-public/go.mod h1:MBHOSmj81EtNEGjvYK3UEaFMMrN6jo5wt9U7jvDVLUw= github.com/onflow/crypto v0.25.3 h1:XQ3HtLsw8h1+pBN+NQ1JYM9mS2mVXTyg55OldaAIF7U= github.com/onflow/crypto v0.25.3/go.mod h1:+1igaXiK6Tjm9wQOBD1EGwW7bYWMUGKtwKJ/2QL/OWs= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.6.1 h1:n2NTsVT4iH7snqW6nkP1PnxHmgEMCh3XZpbdSIqweO4= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.6.1/go.mod h1:3tMXL4npVbk/F1X6SqxZWelQn0pyGvMBVbUXsgvC6Is= -github.com/onflow/flow-core-contracts/lib/go/templates v1.6.1 h1:Y0bDvS5fTOCrKr7QFl0by3qTq7MFnauVnHoxwW6nQzo= -github.com/onflow/flow-core-contracts/lib/go/templates v1.6.1/go.mod h1:pN768Al/wLRlf3bwugv9TyxniqJxMu4sxnX9eQJam64= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.7.4-0.20250825171851-5a0da78d0022 h1:RtNoSnri8LwUzlUeffpeBXjazSf7kqhFbHDOAu32bXc= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.7.4-0.20250825171851-5a0da78d0022/go.mod h1:/Yne6g7V2Fy1sm/vE78us221bYvVvL5cA8cOzN/uTCI= +github.com/onflow/flow-core-contracts/lib/go/templates v1.7.2-0.20250825171851-5a0da78d0022 h1:upAfrSTYm/a/Q7UMOE2sqJxsvgOGUkib7In59ogrwDI= +github.com/onflow/flow-core-contracts/lib/go/templates v1.7.2-0.20250825171851-5a0da78d0022/go.mod h1:yBkysayvSKZ/yFO3fEX4YQ/FEZtV6Tnov8ix0lBeiqM= github.com/onflow/flow-evm-bridge v0.1.0 h1:7X2osvo4NnQgHj8aERUmbYtv9FateX8liotoLnPL9nM= github.com/onflow/flow-evm-bridge v0.1.0/go.mod h1:5UYwsnu6WcBNrwitGFxphCl5yq7fbWYGYuiCSTVF6pk= github.com/onflow/flow-ft/lib/go/contracts v1.0.1 h1:Ts5ob+CoCY2EjEd0W6vdLJ7hLL3SsEftzXG2JlmSe24= github.com/onflow/flow-ft/lib/go/contracts v1.0.1/go.mod h1:PwsL8fC81cjnUnTfmyL/HOIyHnyaw/JA474Wfj2tl6A= github.com/onflow/flow-ft/lib/go/templates v1.0.1 h1:FDYKAiGowABtoMNusLuRCILIZDtVqJ/5tYI4VkF5zfM= github.com/onflow/flow-ft/lib/go/templates v1.0.1/go.mod h1:uQ8XFqmMK2jxyBSVrmyuwdWjTEb+6zGjRYotfDJ5pAE= -github.com/onflow/flow-go-sdk v1.6.0 h1:rN5T5Icva4GjV+RPiUlFm2FMmm0IkQ9f/B8SDlZHRu8= -github.com/onflow/flow-go-sdk v1.6.0/go.mod h1:EBcCMA9Bbjgp/A21i4qCthv9enV4CUYEVZoF8a68vMQ= +github.com/onflow/flow-go-sdk v1.7.0 h1:kSw94LZ+0ppt5ELqzixk7jjzkcrOR0Lh4mOgyu+KTOI= +github.com/onflow/flow-go-sdk v1.7.0/go.mod h1:a5JyRWg1mT6MoixnjTl/E/6AO95u/r2BBy7U/CycvUM= github.com/onflow/flow-nft/lib/go/contracts v1.2.4 h1:gWJgSSgIGo0qWOqr90+khQ69VoYF9vNlqzF+Yh6YYy4= github.com/onflow/flow-nft/lib/go/contracts v1.2.4/go.mod h1:eZ9VMMNfCq0ho6kV25xJn1kXeCfxnkhj3MwF3ed08gY= github.com/onflow/flow-nft/lib/go/templates v1.2.1 h1:SAALMZPDw9Eb9p5kSLnmnFxjyig1MLiT4JUlLp0/bSE= github.com/onflow/flow-nft/lib/go/templates v1.2.1/go.mod h1:W6hOWU0xltPqNpv9gQX8Pj8Jtf0OmRxc1XX2V0kzJaI= -github.com/onflow/flow/protobuf/go/flow v0.4.10 h1:CGEO3n96XZQd/k5HtkZyb90ouem9G+8fNcKyt8s2fvs= -github.com/onflow/flow/protobuf/go/flow v0.4.10/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= -github.com/onflow/go-ethereum v1.15.10 h1:blZBeOLJDOVWqKuhkkMh6S2PKQAJvdgbvOL9ZNggFcU= -github.com/onflow/go-ethereum v1.15.10/go.mod h1:t2nZJtwruVjA5u5yEK8InFzjImFLHrF7ak2bw3E4LDM= +github.com/onflow/flow/protobuf/go/flow v0.4.11 h1:EYjaATmor1ONczbvJ6VejQAJK4elFRk5h9AapxQ7j4c= +github.com/onflow/flow/protobuf/go/flow v0.4.11/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/onflow/go-ethereum v1.13.4 h1:iNO86fm8RbBbhZ87ZulblInqCdHnAQVY8okBrNsTevc= +github.com/onflow/go-ethereum v1.13.4/go.mod h1:cE/gEUkAffhwbVmMJYz+t1dAfVNHNwZCgc3BWtZxBGY= github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 h1:sxyWLqGm/p4EKT6DUlQESDG1ZNMN9GjPCm1gTq7NGfc= github.com/onflow/nft-storefront/lib/go/contracts v1.0.0/go.mod h1:kMeq9zUwCrgrSojEbTUTTJpZ4WwacVm2pA7LVFr+glk= github.com/onflow/sdks v0.6.0-preview.1 h1:mb/cUezuqWEP1gFZNAgUI4boBltudv4nlfxke1KBp9k= @@ -914,6 +919,8 @@ github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3c github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4= +github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs= github.com/psiemens/graceland v1.0.0 h1:L580AVV4Q2XLcPpmvxJRH9UpEAYr/eu2jBKmMglhvM8= github.com/psiemens/graceland v1.0.0/go.mod h1:1Tof+vt1LbmcZFE0lzgdwMN0QBymAChG3FRgDx8XisU= github.com/psiemens/sconfig v0.1.0 h1:xfWqW+TRpih7mXZIqKYTmpRhlZLQ1kbxV8EjllPv76s= @@ -1018,8 +1025,9 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6 github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= @@ -1116,28 +1124,28 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1186,8 +1194,8 @@ golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo= golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= @@ -1205,8 +1213,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= -golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= +golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1248,8 +1256,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= -golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1268,8 +1276,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= -golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1313,7 +1321,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1332,8 +1339,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1344,8 +1351,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -1359,14 +1366,14 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= -golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1391,8 +1398,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= -golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1404,8 +1411,8 @@ gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= -google.golang.org/api v0.232.0 h1:qGnmaIMf7KcuwHOlF3mERVzChloDYwRfOJOrHt8YC3I= -google.golang.org/api v0.232.0/go.mod h1:p9QCfBWZk1IJETUdbTKloR5ToFdKbYh2fkjsUL6vNoY= +google.golang.org/api v0.241.0 h1:QKwqWQlkc6O895LchPEDUSYr22Xp3NCxpQRiWTB6avE= +google.golang.org/api v0.241.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1420,14 +1427,14 @@ google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= -google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e h1:UdXH7Kzbj+Vzastr5nVfccbmFsmYNygVLSPk1pEfDoY= -google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e/go.mod h1:085qFyf2+XaZlRdCgKNCIZ3afY2p4HHZdoIRpId8F4A= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20250428153025-10db94c68c34 h1:nfEb4Q4usCEhvyA4vmf47NmO3alop2ab5p5gupICWU4= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20250428153025-10db94c68c34/go.mod h1:h6yxum/C2qRb4txaZRLDHK8RyS0H/o2oEDeKY4onY/Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 h1:h6p3mQqrmT1XkHVTfzLdNz1u7IhINeZkz67/xTbOuWs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= +google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a h1:SGktgSolFCo75dnHJF2yMvnns6jCmHFJ0vE4Vn2JKvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a/go.mod h1:a77HrdMjoeKbnd2jmgcWdaS++ZLZAEq3orIOAEIKiVw= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20250603155806-513f23925822 h1:zWFRixYR5QlotL+Uv3YfsPRENIrQFXiGs+iwqel6fOQ= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20250603155806-513f23925822/go.mod h1:h6yxum/C2qRb4txaZRLDHK8RyS0H/o2oEDeKY4onY/Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1437,8 +1444,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= -google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= +google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1500,7 +1507,5 @@ lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= -rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= -rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/integration/testnet/container.go b/integration/testnet/container.go index acb0581984c..fc94dcab586 100644 --- a/integration/testnet/container.go +++ b/integration/testnet/container.go @@ -31,7 +31,7 @@ import ( "github.com/onflow/flow-go/module/metrics" state "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/inmem" - storage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/store" ) @@ -98,8 +98,9 @@ func GetPrivateNodeInfoAddress(nodeName string) string { return fmt.Sprintf("%s:%d", nodeName, DefaultFlowPort) } -func NewContainerConfig(nodeName string, conf NodeConfig, networkKey, stakingKey crypto.PrivateKey) ContainerConfig { - info := bootstrap.NewPrivateNodeInfo( +func NewContainerConfig(nodeName string, conf NodeConfig, networkKey, stakingKey crypto.PrivateKey, +) (ContainerConfig, error) { + info, err := bootstrap.NewPrivateNodeInfo( conf.Identifier, conf.Role, GetPrivateNodeInfoAddress(nodeName), @@ -107,6 +108,9 @@ func NewContainerConfig(nodeName string, conf NodeConfig, networkKey, stakingKey networkKey, stakingKey, ) + if err != nil { + return ContainerConfig{}, err + } containerConf := ContainerConfig{ NodeInfo: info, @@ -119,7 +123,7 @@ func NewContainerConfig(nodeName string, conf NodeConfig, networkKey, stakingKey Corrupted: conf.Corrupted, } - return containerConf + return containerConf, nil } // ImageName returns the Docker image name for the given config. @@ -380,32 +384,35 @@ func (c *Container) Connect() error { } func (c *Container) OpenState() (*state.State, error) { - db, err := c.DB() + lockManager := storage.NewTestingLockManager() + badgerdb, err := c.DB() if err != nil { return nil, err } + db := badgerimpl.ToDB(badgerdb) metrics := metrics.NewNoopCollector() - index := storage.NewIndex(metrics, db) - headers := storage.NewHeaders(metrics, db) - seals := storage.NewSeals(metrics, db) - results := storage.NewExecutionResults(metrics, db) - receipts := storage.NewExecutionReceipts(metrics, db, results, storage.DefaultCacheSize) - guarantees := storage.NewGuarantees(metrics, db, storage.DefaultCacheSize) - payloads := storage.NewPayloads(db, index, guarantees, seals, receipts, results) - blocks := storage.NewBlocks(db, headers, payloads) - qcs := storage.NewQuorumCertificates(metrics, db, storage.DefaultCacheSize) - setups := storage.NewEpochSetups(metrics, db) - commits := storage.NewEpochCommits(metrics, db) - protocolState := storage.NewEpochProtocolStateEntries(metrics, setups, commits, db, - storage.DefaultEpochProtocolStateCacheSize, storage.DefaultProtocolStateIndexCacheSize) - protocolKVStates := storage.NewProtocolKVStore(metrics, db, - storage.DefaultProtocolKVStoreCacheSize, storage.DefaultProtocolKVStoreByBlockIDCacheSize) - versionBeacons := store.NewVersionBeacons(badgerimpl.ToDB(db)) + index := store.NewIndex(metrics, db) + headers := store.NewHeaders(metrics, db) + seals := store.NewSeals(metrics, db) + results := store.NewExecutionResults(metrics, db) + receipts := store.NewExecutionReceipts(metrics, db, results, store.DefaultCacheSize) + guarantees := store.NewGuarantees(metrics, db, store.DefaultCacheSize) + payloads := store.NewPayloads(db, index, guarantees, seals, receipts, results) + blocks := store.NewBlocks(db, headers, payloads) + qcs := store.NewQuorumCertificates(metrics, db, store.DefaultCacheSize) + setups := store.NewEpochSetups(metrics, db) + commits := store.NewEpochCommits(metrics, db) + protocolState := store.NewEpochProtocolStateEntries(metrics, setups, commits, db, + store.DefaultEpochProtocolStateCacheSize, store.DefaultProtocolStateIndexCacheSize) + protocolKVStates := store.NewProtocolKVStore(metrics, db, + store.DefaultProtocolKVStoreCacheSize, store.DefaultProtocolKVStoreByBlockIDCacheSize) + versionBeacons := store.NewVersionBeacons(db) return state.OpenState( metrics, db, + lockManager, headers, seals, results, diff --git a/integration/testnet/network.go b/integration/testnet/network.go index e60f1ea2c93..be94a08a539 100644 --- a/integration/testnet/network.go +++ b/integration/testnet/network.go @@ -16,6 +16,7 @@ import ( "time" "github.com/onflow/flow-go/follower/database" + "github.com/onflow/flow-go/state/protocol/datastore" "github.com/onflow/flow-go/state/protocol/protocol_state" "github.com/dapperlabs/testingdock" @@ -52,9 +53,9 @@ import ( "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/onflow/flow-go/network/p2p/translator" clusterstate "github.com/onflow/flow-go/state/cluster" - "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" + "github.com/onflow/flow-go/storage" badgerstorage "github.com/onflow/flow-go/storage/badger" "github.com/onflow/flow-go/utils/io" "github.com/onflow/flow-go/utils/unittest" @@ -678,7 +679,7 @@ func PrepareFlowNetwork(t *testing.T, networkConf NetworkConfig, chainID flow.Ch return flowNetwork } -func (net *FlowNetwork) addConsensusFollower(t *testing.T, rootProtocolSnapshotPath string, followerConf ConsensusFollowerConfig, containers []ContainerConfig) { +func (net *FlowNetwork) addConsensusFollower(t *testing.T, rootProtocolSnapshotPath string, followerConf ConsensusFollowerConfig, _ []ContainerConfig) { tmpdir := makeTempSubDir(t, net.baseTempdir, "flow-consensus-follower") // create a directory for the follower database @@ -705,6 +706,7 @@ func (net *FlowNetwork) addConsensusFollower(t *testing.T, rootProtocolSnapshotP WithValueLogMaxEntries(100000) // Default is 1000000 badgerDB, err := badgerstorage.InitPublic(dbOpts) require.NoError(t, err) + lockManager := storage.NewTestingLockManager() bindAddr := gonet.JoinHostPort("localhost", testingdock.RandomPort(t)) opts := append( @@ -715,6 +717,8 @@ func (net *FlowNetwork) addConsensusFollower(t *testing.T, rootProtocolSnapshotP // denied error. consensus_follower.WithPebbleDB(pebbleDB), consensus_follower.WithBootstrapDir(followerBootstrapDir), + // each consenesus follower will have a different lock manager singleton + consensus_follower.WithLockManager(lockManager), ) stakedANContainer := net.ContainerByID(followerConf.StakedNodeID) @@ -1054,7 +1058,7 @@ func followerNodeInfos(confs []ConsensusFollowerConfig) ([]bootstrap.NodeInfo, e dummyStakingKey := unittest.StakingPrivKeyFixture() for _, conf := range confs { - info := bootstrap.NewPrivateNodeInfo( + info, err := bootstrap.NewPrivateNodeInfo( conf.NodeID, flow.RoleAccess, // use Access role "", // no address @@ -1062,6 +1066,9 @@ func followerNodeInfos(confs []ConsensusFollowerConfig) ([]bootstrap.NodeInfo, e conf.NetworkingPrivKey, dummyStakingKey, ) + if err != nil { + return nil, err + } nodeInfos = append(nodeInfos, info) } @@ -1195,33 +1202,46 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string, chainID fl targetDuration := networkConf.ViewsInEpoch / networkConf.ViewsPerSecond // generate epoch service events - epochSetup := &flow.EpochSetup{ - Counter: epochCounter, - FirstView: rootHeader.View, - DKGPhase1FinalView: dkgOffsetView + networkConf.ViewsInDKGPhase, - DKGPhase2FinalView: dkgOffsetView + networkConf.ViewsInDKGPhase*2, - DKGPhase3FinalView: dkgOffsetView + networkConf.ViewsInDKGPhase*3, - FinalView: rootHeader.View + networkConf.ViewsInEpoch - 1, - Participants: participants.ToSkeleton(), - Assignments: clusterAssignments, - RandomSource: randomSource, - TargetDuration: targetDuration, - TargetEndTime: uint64(time.Now().Unix()) + targetDuration, - } - - epochCommit := &flow.EpochCommit{ - Counter: epochCounter, - ClusterQCs: flow.ClusterQCVoteDatasFromQCs(qcsWithSignerIDs), - DKGGroupKey: dkg.PubGroupKey, - DKGParticipantKeys: dkg.PubKeyShares, - DKGIndexMap: dkgIndexMap, + epochSetup, err := flow.NewEpochSetup( + flow.UntrustedEpochSetup{ + Counter: epochCounter, + FirstView: rootHeader.View, + DKGPhase1FinalView: dkgOffsetView + networkConf.ViewsInDKGPhase, + DKGPhase2FinalView: dkgOffsetView + networkConf.ViewsInDKGPhase*2, + DKGPhase3FinalView: dkgOffsetView + networkConf.ViewsInDKGPhase*3, + FinalView: rootHeader.View + networkConf.ViewsInEpoch - 1, + Participants: participants.ToSkeleton(), + Assignments: clusterAssignments, + RandomSource: randomSource, + TargetDuration: targetDuration, + TargetEndTime: uint64(time.Now().Unix()) + targetDuration, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch setup: %w", err) + } + + epochCommit, err := flow.NewEpochCommit( + flow.UntrustedEpochCommit{ + Counter: epochCounter, + ClusterQCs: flow.ClusterQCVoteDatasFromQCs(qcsWithSignerIDs), + DKGGroupKey: dkg.PubGroupKey, + DKGParticipantKeys: dkg.PubKeyShares, + DKGIndexMap: dkgIndexMap, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch commit: %w", err) } + root := &flow.Block{ Header: rootHeader, } - rootProtocolState, err := networkConf.KVStoreFactory( - inmem.EpochProtocolStateFromServiceEvents(epochSetup, epochCommit).ID(), - ) + minEpochStateEntry, err := inmem.EpochProtocolStateFromServiceEvents(epochSetup, epochCommit) + if err != nil { + return nil, fmt.Errorf("could not construct epoch protocol state: %w", err) + } + rootProtocolState, err := networkConf.KVStoreFactory(minEpochStateEntry.ID()) if err != nil { return nil, err } @@ -1260,7 +1280,7 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string, chainID fl fvm.WithStorageMBPerFLOW(fvm.DefaultStorageMBPerFLOW), fvm.WithRootBlock(root.Header), fvm.WithEpochConfig(epochConfig), - fvm.WithIdentities(participants), + fvm.WithNodes(stakedNodeInfos), ) if err != nil { return nil, err @@ -1302,7 +1322,7 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string, chainID fl return nil, fmt.Errorf("could not create bootstrap state snapshot: %w", err) } - err = badger.IsValidRootSnapshotQCs(snapshot) + err = datastore.IsValidRootSnapshotQCs(snapshot) if err != nil { return nil, fmt.Errorf("invalid root snapshot qcs: %w", err) } @@ -1347,7 +1367,7 @@ func setupKeys(networkConf NetworkConfig) ([]ContainerConfig, error) { addr := fmt.Sprintf("%s:%d", name, DefaultFlowPort) roleCounter[conf.Role]++ - info := bootstrap.NewPrivateNodeInfo( + info, err := bootstrap.NewPrivateNodeInfo( conf.Identifier, conf.Role, addr, @@ -1355,6 +1375,9 @@ func setupKeys(networkConf NetworkConfig) ([]ContainerConfig, error) { networkKeys[i], stakingKeys[i], ) + if err != nil { + return nil, err + } containerConf := ContainerConfig{ NodeInfo: info, diff --git a/integration/testnet/util.go b/integration/testnet/util.go index c48d9bc0afd..f8e619e1bec 100644 --- a/integration/testnet/util.go +++ b/integration/testnet/util.go @@ -171,7 +171,7 @@ func WriteTestExecutionService(_ flow.Identifier, address, observerName, bootstr log.Info().Msgf("test execution node private key: %v, public key: %x, peerID: %v, nodeID: %v", networkKey, k, peerID, nodeID) - nodeInfo := bootstrap.NewPrivateNodeInfo( + nodeInfo, err := bootstrap.NewPrivateNodeInfo( nodeID, flow.RoleExecution, address, @@ -179,6 +179,9 @@ func WriteTestExecutionService(_ flow.Identifier, address, observerName, bootstr networkKey, stakingKey, ) + if err != nil { + return bootstrap.NodeInfo{}, fmt.Errorf("failed to create node info: %w", err) + } path := fmt.Sprintf("%s/private-root-information/private-node-info_%v/%vjson", bootstrapDir, nodeID, bootstrap.PathPrivNodeInfoPrefix) diff --git a/integration/tests/access/cohort1/access_api_test.go b/integration/tests/access/cohort1/access_api_test.go index 6470ced35b2..7aefc0532bf 100644 --- a/integration/tests/access/cohort1/access_api_test.go +++ b/integration/tests/access/cohort1/access_api_test.go @@ -15,6 +15,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" "github.com/onflow/flow-go/integration/tests/mvp" "github.com/onflow/flow-go/utils/dsl" @@ -28,7 +29,6 @@ import ( sdk "github.com/onflow/flow-go-sdk" client "github.com/onflow/flow-go-sdk/access/grpc" - "github.com/onflow/flow-go/engine/access/rpc/backend" "github.com/onflow/flow-go/integration/testnet" "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/integration/utils" @@ -115,8 +115,8 @@ func (s *AccessAPISuite) SetupTest() { flow.RoleAccess, testnet.WithLogLevel(zerolog.FatalLevel), // make sure test continues to test as expected if the default config changes - testnet.WithAdditionalFlagf("--script-execution-mode=%s", backend.IndexQueryModeExecutionNodesOnly), - testnet.WithAdditionalFlagf("--tx-result-query-mode=%s", backend.IndexQueryModeExecutionNodesOnly), + testnet.WithAdditionalFlagf("--script-execution-mode=%s", query_mode.IndexQueryModeExecutionNodesOnly), + testnet.WithAdditionalFlagf("--tx-result-query-mode=%s", query_mode.IndexQueryModeExecutionNodesOnly), ) indexingAccessConfig := testnet.NewNodeConfig( @@ -127,7 +127,7 @@ func (s *AccessAPISuite) SetupTest() { testnet.WithAdditionalFlag("--execution-data-retry-delay=1s"), testnet.WithAdditionalFlag("--execution-data-indexing-enabled=true"), testnet.WithAdditionalFlagf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), - testnet.WithAdditionalFlagf("--script-execution-mode=%s", backend.IndexQueryModeLocalOnly), + testnet.WithAdditionalFlagf("--script-execution-mode=%s", query_mode.IndexQueryModeLocalOnly), ) consensusConfigs := []func(config *testnet.NodeConfig){ diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_test.go index cc2709f9780..b396fb9fd5a 100644 --- a/integration/tests/access/cohort2/observer_indexer_enabled_test.go +++ b/integration/tests/access/cohort2/observer_indexer_enabled_test.go @@ -18,7 +18,7 @@ import ( sdkcrypto "github.com/onflow/flow-go-sdk/crypto" "github.com/onflow/flow-go-sdk/templates" - "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/integration/testnet" "github.com/onflow/flow-go/integration/tests/lib" @@ -97,8 +97,8 @@ func (s *ObserverIndexerEnabledSuite) SetupTest() { testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.InfoLevel), testnet.WithAdditionalFlag("--supports-observer=true"), testnet.WithAdditionalFlagf("--public-network-execution-data-sync-enabled=true"), - testnet.WithAdditionalFlagf("--script-execution-mode=%s", backend.IndexQueryModeExecutionNodesOnly), - testnet.WithAdditionalFlagf("--tx-result-query-mode=%s", backend.IndexQueryModeExecutionNodesOnly), + testnet.WithAdditionalFlagf("--script-execution-mode=%s", query_mode.IndexQueryModeExecutionNodesOnly), + testnet.WithAdditionalFlagf("--tx-result-query-mode=%s", query_mode.IndexQueryModeExecutionNodesOnly), testnet.WithAdditionalFlag("--event-query-mode=execution-nodes-only"), ), diff --git a/integration/tests/access/cohort4/access_test.go b/integration/tests/access/cohort4/access_test.go index 22f2e9d4942..5f059783f2c 100644 --- a/integration/tests/access/cohort4/access_test.go +++ b/integration/tests/access/cohort4/access_test.go @@ -6,10 +6,6 @@ import ( "testing" "time" - "github.com/onflow/flow-go/consensus/hotstuff/committees" - "github.com/onflow/flow-go/consensus/hotstuff/signature" - "github.com/onflow/flow-go/engine/common/rpc/convert" - "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -19,6 +15,9 @@ import ( accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow-go/consensus/hotstuff/committees" + "github.com/onflow/flow-go/consensus/hotstuff/signature" + "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/integration/testnet" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -56,28 +55,14 @@ func (s *AccessSuite) SetupTest() { nodeConfigs := []testnet.NodeConfig{ testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.InfoLevel)), - } - - // need one dummy execution node (unused ghost) - exeConfig := testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()) - nodeConfigs = append(nodeConfigs, exeConfig) - - // need one dummy verification node (unused ghost) - verConfig := testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()) - nodeConfigs = append(nodeConfigs, verConfig) - - // need one controllable collection node (unused ghost) - collConfig := testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()) - nodeConfigs = append(nodeConfigs, collConfig) - - // need three consensus nodes (unused ghost) - for n := 0; n < 3; n++ { - conID := unittest.IdentifierFixture() - nodeConfig := testnet.NewNodeConfig(flow.RoleConsensus, - testnet.WithLogLevel(zerolog.FatalLevel), - testnet.WithID(conID), - testnet.AsGhost()) - nodeConfigs = append(nodeConfigs, nodeConfig) + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), } conf := testnet.NewNetworkConfig("access_api_test", nodeConfigs) @@ -90,8 +75,14 @@ func (s *AccessSuite) SetupTest() { s.net.Start(s.ctx) } -func (s *AccessSuite) TestAPIsAvailable() { +func (s *AccessSuite) TestAllTheThings() { + s.runTestAPIsAvailable() + // run this test last because it stops the container + s.runTestSignerIndicesDecoding() +} + +func (s *AccessSuite) runTestAPIsAvailable() { s.T().Run("TestHTTPProxyPortOpen", func(t *testing.T) { httpProxyAddress := s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCWebPort) @@ -106,22 +97,22 @@ func (s *AccessSuite) TestAPIsAvailable() { defer cancel() grpcAddress := s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCPort) - conn, err := grpc.DialContext(ctx, grpcAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(grpcAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err, "failed to connect to access node") defer conn.Close() client := accessproto.NewAccessAPIClient(conn) - _, err = client.Ping(s.ctx, &accessproto.PingRequest{}) + _, err = client.Ping(ctx, &accessproto.PingRequest{}) assert.NoError(t, err, "failed to ping access node") }) } -// TestSignerIndicesDecoding tests that access node uses signer indices' decoder to correctly parse encoded data in blocks. +// runTestSignerIndicesDecoding tests that access node uses signer indices' decoder to correctly parse encoded data in blocks. // This test receives blocks from consensus follower and then requests same blocks from access API and checks if returned data // matches. -func (s *AccessSuite) TestSignerIndicesDecoding() { - +// CAUTION: must be run last if running multiple tests using the same network since it stops the containers. +func (s *AccessSuite) runTestSignerIndicesDecoding() { container := s.net.ContainerByName(testnet.PrimaryAN) ctx, cancel := context.WithCancel(s.ctx) @@ -129,28 +120,32 @@ func (s *AccessSuite) TestSignerIndicesDecoding() { // create access API grpcAddress := container.Addr(testnet.GRPCPort) - conn, err := grpc.DialContext(ctx, grpcAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(grpcAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(s.T(), err, "failed to connect to access node") defer conn.Close() client := accessproto.NewAccessAPIClient(conn) - // query latest finalized block - latestFinalizedBlock, err := MakeApiRequest(client.GetLatestBlockHeader, ctx, &accessproto.GetLatestBlockHeaderRequest{ - IsSealed: false, - }) - require.NoError(s.T(), err) - + // query latest finalized block. wait until at least two blocks have been finalized. + // otherwise, we may get the root block which does not have any voter indices or its + // immediate child who's parent voter indices are empty. + var latestFinalizedBlock *accessproto.BlockHeaderResponse + require.Eventually(s.T(), func() bool { + latestFinalizedBlock, err = MakeApiRequest(client.GetLatestBlockHeader, ctx, &accessproto.GetLatestBlockHeaderRequest{ + IsSealed: false, + }) + require.NoError(s.T(), err) + return latestFinalizedBlock.GetBlock().Height > 1 + }, 30*time.Second, 100*time.Millisecond) + + // verify we get the same block when querying by ID and height blockByID, err := MakeApiRequest(client.GetBlockHeaderByID, ctx, &accessproto.GetBlockHeaderByIDRequest{Id: latestFinalizedBlock.Block.Id}) require.NoError(s.T(), err) - require.Equal(s.T(), latestFinalizedBlock, blockByID, "expect to receive same block by ID") - blockByHeight, err := MakeApiRequest(client.GetBlockHeaderByHeight, ctx, - &accessproto.GetBlockHeaderByHeightRequest{Height: latestFinalizedBlock.Block.Height}) + blockByHeight, err := MakeApiRequest(client.GetBlockHeaderByHeight, ctx, &accessproto.GetBlockHeaderByHeightRequest{Height: latestFinalizedBlock.Block.Height}) require.NoError(s.T(), err) - - require.Equal(s.T(), blockByID, blockByHeight, "expect to receive same block by height") + require.Equal(s.T(), latestFinalizedBlock, blockByHeight, "expect to receive same block by height") // stop container, so we can access it's state and perform assertions err = s.net.StopContainerByName(ctx, testnet.PrimaryAN) diff --git a/integration/tests/access/cohort4/execution_data_pruning_test.go b/integration/tests/access/cohort4/execution_data_pruning_test.go index 1d231a68829..3b5aeb185ee 100644 --- a/integration/tests/access/cohort4/execution_data_pruning_test.go +++ b/integration/tests/access/cohort4/execution_data_pruning_test.go @@ -18,7 +18,9 @@ import ( "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" accessproto "github.com/onflow/flow/protobuf/go/flow/access" @@ -162,9 +164,9 @@ func (s *ExecutionDataPruningSuite) TestHappyPath() { // setup storage objects needed to get the execution data id anDB, err := accessNode.DB() require.NoError(s.T(), err, "could not open db") - - anHeaders := badger.NewHeaders(metrics, anDB) - anResults := badger.NewExecutionResults(metrics, anDB) + db := badgerimpl.ToDB(anDB) + anHeaders := store.NewHeaders(metrics, db) + anResults := store.NewExecutionResults(metrics, db) // start an execution data service using the Observer Node's execution data db @@ -173,7 +175,7 @@ func (s *ExecutionDataPruningSuite) TestHappyPath() { onDB, err := observerNode.DB() require.NoError(s.T(), err, "could not open db") - onResults := badger.NewExecutionResults(metrics, onDB) + onResults := store.NewExecutionResults(metrics, badgerimpl.ToDB(onDB)) s.checkResults(anHeaders, anResults, onResults, anEds, onEds) } @@ -233,9 +235,9 @@ func (s *ExecutionDataPruningSuite) waitUntilExecutionDataForBlockIndexed(waitin // checkResults checks the results of execution data pruning to ensure correctness. func (s *ExecutionDataPruningSuite) checkResults( - headers *badger.Headers, - anResults *badger.ExecutionResults, - onResults *badger.ExecutionResults, + headers storage.Headers, + anResults storage.ExecutionResults, + onResults storage.ExecutionResults, anEds execution_data.ExecutionDataStore, onEds execution_data.ExecutionDataStore, ) { diff --git a/integration/tests/collection/suite.go b/integration/tests/collection/suite.go index 393e14026d9..be538fccc35 100644 --- a/integration/tests/collection/suite.go +++ b/integration/tests/collection/suite.go @@ -22,6 +22,7 @@ import ( "github.com/onflow/flow-go/model/messages" clusterstate "github.com/onflow/flow-go/state/cluster" clusterstateimpl "github.com/onflow/flow-go/state/cluster/badger" + "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/utils/unittest" ) @@ -351,7 +352,7 @@ func (suite *CollectorSuite) ClusterStateFor(id flow.Identifier) *clusterstateim rootQC := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(rootBlock.ID())) clusterStateRoot, err := clusterstateimpl.NewStateRoot(rootBlock, rootQC, setup.Counter) suite.NoError(err) - clusterState, err := clusterstateimpl.OpenState(db, nil, nil, nil, clusterStateRoot.ClusterID(), clusterStateRoot.EpochCounter()) + clusterState, err := clusterstateimpl.OpenState(badgerimpl.ToDB(db), nil, nil, nil, clusterStateRoot.ClusterID(), clusterStateRoot.EpochCounter()) require.NoError(suite.T(), err, "could not get cluster state") return clusterState diff --git a/integration/tests/epochs/dynamic_epoch_transition_suite.go b/integration/tests/epochs/dynamic_epoch_transition_suite.go index 76ee38a2658..8937fca0eb9 100644 --- a/integration/tests/epochs/dynamic_epoch_transition_suite.go +++ b/integration/tests/epochs/dynamic_epoch_transition_suite.go @@ -106,6 +106,9 @@ func (s *DynamicEpochTransitionSuite) StakeNode(ctx context.Context, env templat latestBlockID, err := s.Client.GetLatestBlockID(ctx) require.NoError(s.T(), err) + stakingKeyPoP, err := crypto.BLSGeneratePOP(stakingKey) + require.NoError(s.T(), err) + // create and register node tx, err := utils.MakeCreateAndSetupNodeTx( env, @@ -118,6 +121,7 @@ func (s *DynamicEpochTransitionSuite) StakeNode(ctx context.Context, env templat testnet.GetPrivateNodeInfoAddress(containerName), strings.TrimPrefix(networkingKey.PublicKey().String(), "0x"), strings.TrimPrefix(stakingKey.PublicKey().String(), "0x"), + strings.TrimPrefix(stakingKeyPoP.String(), "0x"), machineAccountPubKey, ) require.NoError(s.T(), err) @@ -165,13 +169,13 @@ func (s *DynamicEpochTransitionSuite) generateAccountKeys(role flow.Role) ( machineAccountKey crypto.PrivateKey, machineAccountPubKey *sdk.AccountKey, ) { - operatorAccountKey = unittest.PrivateKeyFixture(crypto.ECDSAP256, crypto.KeyGenSeedMinLen) + operatorAccountKey = unittest.PrivateKeyFixture(crypto.ECDSAP256) networkingKey = unittest.NetworkingPrivKeyFixture() stakingKey = unittest.StakingPrivKeyFixture() // create a machine account if role == flow.RoleConsensus || role == flow.RoleCollection { - machineAccountKey = unittest.PrivateKeyFixture(crypto.ECDSAP256, crypto.KeyGenSeedMinLen) + machineAccountKey = unittest.PrivateKeyFixture(crypto.ECDSAP256) machineAccountPubKey = &sdk.AccountKey{ PublicKey: machineAccountKey.PublicKey(), @@ -303,8 +307,9 @@ func (s *DynamicEpochTransitionSuite) NewTestContainerOnNetwork(role flow.Role, } nodeConfig := testnet.NewNodeConfig(role, containerConfigs...) - testContainerConfig := testnet.NewContainerConfig(info.ContainerName, nodeConfig, info.NetworkingKey, info.StakingKey) - err := testContainerConfig.WriteKeyFiles(s.Net.BootstrapDir, info.MachineAccountAddress, encodable.MachineAccountPrivKey{PrivateKey: info.MachineAccountKey}, role) + testContainerConfig, err := testnet.NewContainerConfig(info.ContainerName, nodeConfig, info.NetworkingKey, info.StakingKey) + require.NoError(s.T(), err) + err = testContainerConfig.WriteKeyFiles(s.Net.BootstrapDir, info.MachineAccountAddress, encodable.MachineAccountPrivKey{PrivateKey: info.MachineAccountKey}, role) require.NoError(s.T(), err) //add our container to the network diff --git a/integration/utils/templates/create-and-setup-node.cdc b/integration/utils/templates/create-and-setup-node.cdc index b1ae1faacb1..8f22579a00a 100644 --- a/integration/utils/templates/create-and-setup-node.cdc +++ b/integration/utils/templates/create-and-setup-node.cdc @@ -12,6 +12,7 @@ transaction( networkingAddress: String, networkingKey: String, stakingKey: String, + stakingKeyPoP: String, machineAcctKey: Crypto.KeyListEntry?) { prepare(service: auth(BorrowValue) &Account) { @@ -62,6 +63,7 @@ transaction( networkingAddress: networkingAddress, networkingKey: networkingKey, stakingKey: stakingKey, + stakingKeyPoP: stakingKeyPoP, amount: stake, payer: service, ) { diff --git a/integration/utils/transactions.go b/integration/utils/transactions.go index 6edd8d510ce..ec83807e3e5 100644 --- a/integration/utils/transactions.go +++ b/integration/utils/transactions.go @@ -54,6 +54,7 @@ func MakeCreateAndSetupNodeTx( networkingAddress string, networkingKey string, stakingKey string, + stakingKeyPoP string, machineKey *sdk.AccountKey, ) ( *sdk.Transaction, @@ -136,6 +137,16 @@ func MakeCreateAndSetupNodeTx( return nil, err } + // 7 - staking key PoP + stakingKeyPoPCDC, err := cadence.NewString(stakingKeyPoP) + if err != nil { + return nil, err + } + err = tx.AddArgument(stakingKeyPoPCDC) + if err != nil { + return nil, err + } + if machineKey != nil { // for collection/consensus nodes, register the machine account key cdcMachineAcctKey, err := sdktemplates.AccountKeyToCadenceCryptoKey(machineKey) @@ -220,7 +231,7 @@ func MakeSetProtocolStateVersionTx( // This ensures a single transaction can be sealed by the network. func CreateFlowAccount(ctx context.Context, client *testnet.Client) (sdk.Address, error) { fullAccountKey := sdk.NewAccountKey(). - SetPublicKey(unittest.PrivateKeyFixture(crypto.ECDSAP256, crypto.KeyGenSeedMinLen).PublicKey()). + SetPublicKey(unittest.PrivateKeyFixture(crypto.ECDSAP256).PublicKey()). SetHashAlgo(sdkcrypto.SHA2_256). SetWeight(sdk.AccountKeyWeightThreshold) diff --git a/model/bootstrap/node_info.go b/model/bootstrap/node_info.go index e544d6b2da0..24da2e19b20 100644 --- a/model/bootstrap/node_info.go +++ b/model/bootstrap/node_info.go @@ -159,6 +159,7 @@ type NodeInfoPub struct { Weight uint64 NetworkPubKey encodable.NetworkPubKey StakingPubKey encodable.StakingPubKey + StakingPoP encodable.StakingKeyPoP } // decodableNodeInfoPub provides backward-compatible decoding of old models @@ -170,6 +171,7 @@ type decodableNodeInfoPub struct { Weight uint64 NetworkPubKey encodable.NetworkPubKey StakingPubKey encodable.StakingPubKey + StakingPoP encodable.StakingKeyPoP // Stake previously was used in place of the Weight field. // Deprecated: supported in decoding for backward-compatibility Stake uint64 @@ -206,6 +208,7 @@ func (info *NodeInfoPub) UnmarshalJSON(b []byte) error { info.Weight = decodable.Weight info.NetworkPubKey = decodable.NetworkPubKey info.StakingPubKey = decodable.StakingPubKey + info.StakingPoP = decodable.StakingPoP return nil } @@ -223,6 +226,16 @@ type NodePrivateKeys struct { // A NodeInfo instance can contain EITHER public keys OR private keys, not both. // This can be ensured by using only using the provided constructors and NOT // manually constructing an instance. +// +// Deprecated: There is a concern about the current usage pattern of `NodeInfo“. +// There are no build-time enforcements of using `NodeInfo` for either the private or public usage. The struct +// can mistakenly be used for both cases. Other than introducing a confusing design, developers can accidentally +// confuse the private usage as a public one, for instance by writing the private info (including the private +// keys) into a file that is publicly shared. +// There is an ongoing attempt to replace `NodeInfo` by the explicit structures `NodeInfoPriv` and `NodeInfoPub` +// in https://github.com/onflow/flow-go/pull/7476. +// It is recommended to not use `NodeInfo` in new code development in order to limit the structure usage, and to +// use `NodeInfoPriv` and `NodeInfoPub` instead. type NodeInfo struct { // NodeID is the unique identifier of the node in the network @@ -239,11 +252,17 @@ type NodeInfo struct { // Weight is the weight of the node Weight uint64 - // key information is private - networkPubKey crypto.PublicKey + // PRIVATE Variant: networkPrivKey crypto.PrivateKey - stakingPubKey crypto.PublicKey stakingPrivKey crypto.PrivateKey + + // By convention, `NodeInfo` must either include the public fields and exclude the private fields, or + // vice versa. Mixtures are not allowed. Please check function [NodeInfoType] for the precise convention. + // + // PUBLIC Variant: + networkPubKey crypto.PublicKey + stakingPubKey crypto.PublicKey + stakingPoP crypto.Signature } func NewPublicNodeInfo( @@ -253,6 +272,7 @@ func NewPublicNodeInfo( weight uint64, networkKey crypto.PublicKey, stakingKey crypto.PublicKey, + stakingPoP crypto.Signature, ) NodeInfo { return NodeInfo{ NodeID: nodeID, @@ -261,6 +281,7 @@ func NewPublicNodeInfo( Weight: weight, networkPubKey: networkKey, stakingPubKey: stakingKey, + stakingPoP: stakingPoP, } } @@ -271,7 +292,12 @@ func NewPrivateNodeInfo( weight uint64, networkKey crypto.PrivateKey, stakingKey crypto.PrivateKey, -) NodeInfo { +) (NodeInfo, error) { + pop, err := crypto.BLSGeneratePOP(stakingKey) + if err != nil { + return NodeInfo{}, fmt.Errorf("failed to generate PoP: %w", err) + } + return NodeInfo{ NodeID: nodeID, Role: role, @@ -281,7 +307,8 @@ func NewPrivateNodeInfo( stakingPrivKey: stakingKey, networkPubKey: networkKey.PublicKey(), stakingPubKey: stakingKey.PublicKey(), - } + stakingPoP: pop, + }, nil } // Type returns the type of the node info instance. @@ -289,7 +316,7 @@ func (node NodeInfo) Type() NodeInfoType { if node.networkPrivKey != nil && node.stakingPrivKey != nil { return NodeInfoTypePrivate } - if node.networkPubKey != nil && node.stakingPubKey != nil { + if node.networkPubKey != nil && node.stakingPubKey != nil && node.stakingPoP != nil { return NodeInfoTypePublic } return NodeInfoTypeInvalid @@ -309,6 +336,17 @@ func (node NodeInfo) StakingPubKey() crypto.PublicKey { return node.stakingPrivKey.PublicKey() } +func (node NodeInfo) StakingPoP() (crypto.Signature, error) { + if node.stakingPoP != nil { + return node.stakingPoP, nil + } + pop, err := crypto.BLSGeneratePOP(node.stakingPrivKey) + if err != nil { + return nil, fmt.Errorf("staking PoP generation failed: %w", err) + } + return pop, nil +} + func (node NodeInfo) PrivateKeys() (*NodePrivateKeys, error) { if node.Type() != NodeInfoTypePrivate { return nil, ErrMissingPrivateInfo @@ -334,8 +372,17 @@ func (node NodeInfo) Private() (NodeInfoPriv, error) { }, nil } -// Public returns the canonical public encodable structure -func (node NodeInfo) Public() NodeInfoPub { +// Public returns the canonical encodable structure holding the node's public information. +// It derives the networking and staking public keys, as well as the Proof of Possession (PoP) of the staking private key +// if they are not already provided in the NodeInfo. +// +// It errors, if there is a problem generating the staking key PoP. +func (node NodeInfo) Public() (NodeInfoPub, error) { + stakingPoP, err := node.StakingPoP() + if err != nil { + return NodeInfoPub{}, fmt.Errorf("failed to generate staking PoP: %w", err) + } + return NodeInfoPub{ Role: node.Role, Address: node.Address, @@ -343,18 +390,25 @@ func (node NodeInfo) Public() NodeInfoPub { Weight: node.Weight, NetworkPubKey: encodable.NetworkPubKey{PublicKey: node.NetworkPubKey()}, StakingPubKey: encodable.StakingPubKey{PublicKey: node.StakingPubKey()}, - } + StakingPoP: encodable.StakingKeyPoP{Signature: stakingPoP}, + }, nil } // PartnerPublic returns the public data for a partner node. -func (node NodeInfo) PartnerPublic() PartnerNodeInfoPub { +func (node NodeInfo) PartnerPublic() (PartnerNodeInfoPub, error) { + + stakingPoP, err := node.StakingPoP() + if err != nil { + return PartnerNodeInfoPub{}, fmt.Errorf("failed to generate staking PoP: %w", err) + } return PartnerNodeInfoPub{ Role: node.Role, Address: node.Address, NodeID: node.NodeID, NetworkPubKey: encodable.NetworkPubKey{PublicKey: node.NetworkPubKey()}, StakingPubKey: encodable.StakingPubKey{PublicKey: node.StakingPubKey()}, - } + StakingPoP: stakingPoP, + }, nil } // Identity returns the node info as a public Flow identity. @@ -375,18 +429,9 @@ func (node NodeInfo) Identity() *flow.Identity { return identity } -// NodeInfoFromIdentity converts an identity to a public NodeInfo -func NodeInfoFromIdentity(identity *flow.Identity) NodeInfo { - return NewPublicNodeInfo( - identity.NodeID, - identity.Role, - identity.Address, - identity.InitialWeight, - identity.NetworkPubKey, - identity.StakingPubKey) -} - -func PrivateNodeInfoFromIdentity(identity *flow.Identity, networkKey, stakingKey crypto.PrivateKey) NodeInfo { +// PrivateNodeInfoFromIdentity builds a NodeInfo from a flow Identity. +// WARNING: Nothing enforces that the output NodeInfo's keys are corresponding to the input Identity. +func PrivateNodeInfoFromIdentity(identity *flow.Identity, networkKey, stakingKey crypto.PrivateKey) (NodeInfo, error) { return NewPrivateNodeInfo( identity.NodeID, identity.Role, @@ -428,10 +473,14 @@ func ToIdentityList(nodes []NodeInfo) flow.IdentityList { return il } -func ToPublicNodeInfoList(nodes []NodeInfo) []NodeInfoPub { +func ToPublicNodeInfoList(nodes []NodeInfo) ([]NodeInfoPub, error) { pub := make([]NodeInfoPub, 0, len(nodes)) for _, node := range nodes { - pub = append(pub, node.Public()) + info, err := node.Public() + if err != nil { + return nil, fmt.Errorf("could not read public info: %w", err) + } + pub = append(pub, info) } - return pub + return pub, nil } diff --git a/model/bootstrap/node_info_test.go b/model/bootstrap/node_info_test.go index 635826dd43c..4c67b4bf81c 100644 --- a/model/bootstrap/node_info_test.go +++ b/model/bootstrap/node_info_test.go @@ -66,7 +66,8 @@ func TestNodeConfigEncodingJSON(t *testing.T) { func TestNodeInfoPubEncodingJSON(t *testing.T) { t.Run("normal node info", func(t *testing.T) { - conf := unittest.NodeInfoFixture().Public() + conf, err := unittest.NodeInfoFixture().Public() + require.NoError(t, err) enc, err := json.Marshal(conf) require.NoError(t, err) var dec bootstrap.NodeInfoPub @@ -75,7 +76,8 @@ func TestNodeInfoPubEncodingJSON(t *testing.T) { assert.True(t, dec.Equals(&conf)) }) t.Run("compat: should accept old files using Stake field", func(t *testing.T) { - conf := unittest.NodeInfoFixture().Public() + conf, err := unittest.NodeInfoFixture().Public() + require.NoError(t, err) enc, err := json.Marshal(conf) require.NoError(t, err) // emulate the old encoding by replacing the new field with old field name diff --git a/model/bootstrap/partner_nodes.go.go b/model/bootstrap/partner_nodes.go.go index a65f09d2e18..36e5c9cc41a 100644 --- a/model/bootstrap/partner_nodes.go.go +++ b/model/bootstrap/partner_nodes.go.go @@ -14,4 +14,5 @@ type PartnerNodeInfoPub struct { NodeID flow.Identifier NetworkPubKey encodable.NetworkPubKey StakingPubKey encodable.StakingPubKey + StakingPoP []byte } diff --git a/model/chunks/chunkLocator.go b/model/chunks/chunkLocator.go index f8791f668d7..6152e74cfc9 100644 --- a/model/chunks/chunkLocator.go +++ b/model/chunks/chunkLocator.go @@ -21,6 +21,20 @@ func (c Locator) Checksum() flow.Identifier { return flow.MakeID(c) } +// EqualTo returns true if the two Locator are equivalent. +func (c *Locator) EqualTo(other *Locator) bool { + // Shortcut if `t` and `other` point to the same object; covers case where both are nil. + if c == other { + return true + } + if c == nil || other == nil { // only one is nil, the other not (otherwise we would have returned above) + return false + } + + return c.ResultID == other.ResultID && + c.Index == other.Index +} + // ChunkLocatorID is a util function that returns identifier of corresponding chunk locator to // the specified result and chunk index. func ChunkLocatorID(resultID flow.Identifier, chunkIndex uint64) flow.Identifier { diff --git a/model/chunks/chunkLocator_test.go b/model/chunks/chunkLocator_test.go index 46df3d7462f..0be6fbcb1ad 100644 --- a/model/chunks/chunkLocator_test.go +++ b/model/chunks/chunkLocator_test.go @@ -1,10 +1,12 @@ package chunks_test import ( + "math/rand" "testing" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/utils/unittest" ) @@ -22,3 +24,55 @@ func TestChunkLocatorConvert(t *testing.T) { convertedList := locatorMap.ToList() require.ElementsMatch(t, originalList, convertedList) } + +// TestLocator_EqualTo verifies the correctness of the EqualTo method on Locator. +// It checks that Locators are considered equal if and only if all fields match. +func TestLocator_EqualTo(t *testing.T) { + loc1 := unittest.ChunkLocatorFixture(unittest.IdentifierFixture(), 0) + loc2 := unittest.ChunkLocatorFixture(unittest.IdentifierFixture(), 1) + + require.False(t, loc1.EqualTo(loc2), "Initially, all fields differ; EqualTo should return false") + + // List of mutations to gradually make loc1 equal to loc2 + mutations := []func(){ + func() { + loc1.ResultID = loc2.ResultID + }, + func() { + loc1.Index = loc2.Index + }, + } + + // Shuffle mutation order + rand.Shuffle(len(mutations), func(i, j int) { + mutations[i], mutations[j] = mutations[j], mutations[i] + }) + + // Apply each mutation one at a time, except the last + for _, mutation := range mutations[:len(mutations)-1] { + mutation() + require.False(t, loc1.EqualTo(loc2)) + } + + // Final mutation: should now be equal + mutations[len(mutations)-1]() + require.True(t, loc1.EqualTo(loc2)) +} + +// TestLocator_EqualTo_Nil verifies the behavior of EqualTo when one or both inputs are nil. +func TestLocator_EqualTo_Nil(t *testing.T) { + var nilLoc *chunks.Locator + nonNil := unittest.ChunkLocatorFixture(unittest.IdentifierFixture(), 0) + + t.Run("nil receiver", func(t *testing.T) { + require.False(t, nilLoc.EqualTo(nonNil)) + }) + + t.Run("nil input", func(t *testing.T) { + require.False(t, nonNil.EqualTo(nilLoc)) + }) + + t.Run("both nil", func(t *testing.T) { + require.True(t, nilLoc.EqualTo(nil)) + }) +} diff --git a/model/convert/service_event.go b/model/convert/service_event.go index 514188ee805..92a1bdc3566 100644 --- a/model/convert/service_event.go +++ b/model/convert/service_event.go @@ -152,20 +152,9 @@ func convertServiceEventEpochSetup(event flow.Event) (*flow.ServiceEvent, error) return nil, fmt.Errorf("failed to decode EpochSetup event: %w", err) } - setup := &flow.EpochSetup{ - Counter: uint64(counter), - FirstView: uint64(firstView), - FinalView: uint64(finalView), - DKGPhase1FinalView: uint64(dkgPhase1FinalView), - DKGPhase2FinalView: uint64(dkgPhase2FinalView), - DKGPhase3FinalView: uint64(dkgPhase3FinalView), - TargetDuration: uint64(targetDuration), - TargetEndTime: uint64(targetEndTimeUnix), - } - // random source from the event must be a hex string // containing exactly 128 bits (equivalent to 16 bytes or 32 hex characters) - setup.RandomSource, err = hex.DecodeString(string(randomSrcHex)) + randomSource, err := hex.DecodeString(string(randomSrcHex)) if err != nil { return nil, fmt.Errorf( "could not decode random source hex (%v): %w", @@ -174,25 +163,35 @@ func convertServiceEventEpochSetup(event flow.Event) (*flow.ServiceEvent, error) ) } - if len(setup.RandomSource) != flow.EpochSetupRandomSourceLength { - return nil, fmt.Errorf( - "random source in epoch setup event must be of (%d) bytes, got (%d)", - flow.EpochSetupRandomSourceLength, - len(setup.RandomSource), - ) - } - // parse cluster assignments; returned assignments are in canonical order - setup.Assignments, err = convertClusterAssignments(cdcClusters.Values) + assignments, err := convertClusterAssignments(cdcClusters.Values) if err != nil { return nil, fmt.Errorf("could not convert cluster assignments: %w", err) } // parse epoch participants; returned node identities are in canonical order - setup.Participants, err = convertParticipants(cdcParticipants.Values) + participants, err := convertParticipants(cdcParticipants.Values) if err != nil { return nil, fmt.Errorf("could not convert participants: %w", err) } + setup, err := flow.NewEpochSetup( + flow.UntrustedEpochSetup{ + Counter: uint64(counter), + FirstView: uint64(firstView), + DKGPhase1FinalView: uint64(dkgPhase1FinalView), + DKGPhase2FinalView: uint64(dkgPhase2FinalView), + DKGPhase3FinalView: uint64(dkgPhase3FinalView), + FinalView: uint64(finalView), + Participants: participants, + Assignments: assignments, + RandomSource: randomSource, + TargetDuration: uint64(targetDuration), + TargetEndTime: uint64(targetEndTimeUnix), + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch setup: %w", err) + } // construct the service event serviceEvent := &flow.ServiceEvent{ @@ -296,24 +295,20 @@ func convertServiceEventEpochCommitV1(event flow.Event) (*flow.ServiceEvent, err return nil, fmt.Errorf("failed to decode EpochCommit event: %w", err) } - commit := &flow.EpochCommit{ - Counter: uint64(counter), - } - // parse cluster qc votes - commit.ClusterQCs, err = convertClusterQCVotes(cdcClusterQCVotes.Values) + clusterQCs, err := convertClusterQCVotes(cdcClusterQCVotes.Values) if err != nil { return nil, fmt.Errorf("could not convert cluster qc votes: %w", err) } // parse DKG participants - commit.DKGParticipantKeys, err = convertDKGKeys(cdcDKGKeys.Values) + dKGParticipantKeys, err := convertDKGKeys(cdcDKGKeys.Values) if err != nil { return nil, fmt.Errorf("could not convert Random Beacon keys: %w", err) } // parse DKG group key - commit.DKGGroupKey, err = convertDKGKey(cdcDKGGroupKey) + dKGGroupKey, err := convertDKGKey(cdcDKGGroupKey) if err != nil { return nil, fmt.Errorf("could not convert Random Beacon group key: %w", err) } @@ -340,25 +335,27 @@ func convertServiceEventEpochCommitV1(event flow.Event) (*flow.ServiceEvent, err // ExecutionResult has already been fully constructed, but can't be broadcast). // We will only drop service events whose DKGIndexMap is invalid. As the Protocol State will anyway discard // such events, it is fine to not relay them in the first place. - n := len(cdcDKGIndexMap.Pairs) - encounteredIndices := make([]bool, n) // tracks which indices we have already seed, to detect duplicates - commit.DKGIndexMap = make(flow.DKGIndexMap, n) + dKGIndexMap := make(flow.DKGIndexMap, len(cdcDKGIndexMap.Pairs)) for _, pair := range cdcDKGIndexMap.Pairs { nodeID, err := flow.HexStringToIdentifier(string(pair.Key.(cadence.String))) if err != nil { return nil, fmt.Errorf("failed to decode flow.Identifer in DKGIndexMap entry from EpochRecover event: %w", err) } index := pair.Value.(cadence.Int).Int() - commit.DKGIndexMap[nodeID] = index + dKGIndexMap[nodeID] = index + } - // enforce invariant needed for ID computation: DKGIndexMap values form the set {0, 1, ..., n-1} - if index < 0 || index >= n { - return nil, fmt.Errorf("index %d is outside allowed range [0,n-1] for a DKG committee of size n=%d", index, n) - } - if encounteredIndices[index] { - return nil, fmt.Errorf("duplicated DKG index %d", index) - } - encounteredIndices[index] = true + commit, err := flow.NewEpochCommit( + flow.UntrustedEpochCommit{ + Counter: uint64(counter), + ClusterQCs: clusterQCs, + DKGGroupKey: dKGGroupKey, + DKGParticipantKeys: dKGParticipantKeys, + DKGIndexMap: dKGIndexMap, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch commit: %w", err) } // create the service event @@ -420,12 +417,8 @@ func convertServiceEventEpochCommitV0(event flow.Event) (*flow.ServiceEvent, err return nil, fmt.Errorf("failed to decode EpochCommit event: %w", err) } - commit := &flow.EpochCommit{ - Counter: uint64(counter), - } - // parse cluster qc votes - commit.ClusterQCs, err = convertClusterQCVotes(cdcClusterQCVotes.Values) + clusterQCs, err := convertClusterQCVotes(cdcClusterQCVotes.Values) if err != nil { return nil, fmt.Errorf("could not convert cluster qc votes: %w", err) } @@ -433,15 +426,27 @@ func convertServiceEventEpochCommitV0(event flow.Event) (*flow.ServiceEvent, err // parse DKG group key and participants // Note: this is read in the same order as `DKGClient.SubmitResult` ie. with the group public key first followed by individual keys // https://github.com/onflow/flow-go/blob/feature/dkg/module/dkg/client.go#L182-L183 - commit.DKGGroupKey, err = convertDKGKey(cdcDKGKeys.Values[0]) + dKGGroupKey, err := convertDKGKey(cdcDKGKeys.Values[0]) if err != nil { return nil, fmt.Errorf("could not convert DKG group key: %w", err) } - commit.DKGParticipantKeys, err = convertDKGKeys(cdcDKGKeys.Values[1:]) + dKGParticipantKeys, err := convertDKGKeys(cdcDKGKeys.Values[1:]) if err != nil { return nil, fmt.Errorf("could not convert DKG keys: %w", err) } - commit.DKGIndexMap = nil + + commit, err := flow.NewEpochCommit( + flow.UntrustedEpochCommit{ + Counter: uint64(counter), + ClusterQCs: clusterQCs, + DKGGroupKey: dKGGroupKey, + DKGParticipantKeys: dKGParticipantKeys, + DKGIndexMap: nil, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch commit: %w", err) + } // create the service event serviceEvent := &flow.ServiceEvent{ @@ -562,20 +567,9 @@ func convertServiceEventEpochRecover(event flow.Event) (*flow.ServiceEvent, erro return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) } - setup := flow.EpochSetup{ - Counter: uint64(counter), - FirstView: uint64(firstView), - FinalView: uint64(finalView), - DKGPhase1FinalView: uint64(dkgPhase1FinalView), - DKGPhase2FinalView: uint64(dkgPhase2FinalView), - DKGPhase3FinalView: uint64(dkgPhase3FinalView), - TargetDuration: uint64(targetDuration), - TargetEndTime: uint64(targetEndTimeUnix), - } - // random source from the event must be a hex string // containing exactly 128 bits (equivalent to 16 bytes or 32 hex characters) - setup.RandomSource, err = hex.DecodeString(string(randomSrcHex)) + randomSource, err := hex.DecodeString(string(randomSrcHex)) if err != nil { return nil, fmt.Errorf( "failed to decode random source hex (%v) from EpochRecover event: %w", @@ -584,44 +578,51 @@ func convertServiceEventEpochRecover(event flow.Event) (*flow.ServiceEvent, erro ) } - if len(setup.RandomSource) != flow.EpochSetupRandomSourceLength { - return nil, fmt.Errorf( - "random source in EpochRecover event must be of (%d) bytes, got (%d)", - flow.EpochSetupRandomSourceLength, - len(setup.RandomSource), - ) - } - // parse cluster assignments; returned assignments are in canonical order - setup.Assignments, err = convertEpochRecoverCollectorClusterAssignments(cdcClusters.Values) + assignments, err := convertEpochRecoverCollectorClusterAssignments(cdcClusters.Values) if err != nil { return nil, fmt.Errorf("failed to convert cluster assignments from EpochRecover event: %w", err) } // parse epoch participants; returned node identities are in canonical order - setup.Participants, err = convertParticipants(cdcParticipants.Values) + participants, err := convertParticipants(cdcParticipants.Values) if err != nil { return nil, fmt.Errorf("failed to convert participants from EpochRecover event: %w", err) } - commit := flow.EpochCommit{ - Counter: uint64(counter), + setup, err := flow.NewEpochSetup( + flow.UntrustedEpochSetup{ + Counter: uint64(counter), + FirstView: uint64(firstView), + DKGPhase1FinalView: uint64(dkgPhase1FinalView), + DKGPhase2FinalView: uint64(dkgPhase2FinalView), + DKGPhase3FinalView: uint64(dkgPhase3FinalView), + FinalView: uint64(finalView), + Participants: participants, + Assignments: assignments, + RandomSource: randomSource, + TargetDuration: uint64(targetDuration), + TargetEndTime: uint64(targetEndTimeUnix), + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch setup: %w", err) } // parse cluster qc votes - commit.ClusterQCs, err = convertClusterQCVoteData(cdcClusterQCVoteData.Values) + clusterQCs, err := convertClusterQCVoteData(cdcClusterQCVoteData.Values) if err != nil { return nil, fmt.Errorf("failed to decode clusterQCVoteData from EpochRecover event: %w", err) } // parse DKG participants - commit.DKGParticipantKeys, err = convertDKGKeys(cdcDKGKeys.Values) + dKGParticipantKeys, err := convertDKGKeys(cdcDKGKeys.Values) if err != nil { return nil, fmt.Errorf("failed to decode Random Beacon key shares from EpochRecover event: %w", err) } // parse DKG group key - commit.DKGGroupKey, err = convertDKGKey(cdcDKGGroupKey) + dKGGroupKey, err := convertDKGKey(cdcDKGGroupKey) if err != nil { return nil, fmt.Errorf("failed to decode Random Beacon group key from EpochRecover event: %w", err) } @@ -648,34 +649,43 @@ func convertServiceEventEpochRecover(event flow.Event) (*flow.ServiceEvent, erro // ExecutionResult has already been fully constructed, but can't be broadcast). // We will only drop service events whose DKGIndexMap is invalid. As the Protocol State will anyway discard // such events, it is fine to not relay them in the first place. - n := len(cdcDKGIndexMap.Pairs) - encounteredIndices := make([]bool, n) // tracks which indices we have already seed, to detect duplicates - commit.DKGIndexMap = make(flow.DKGIndexMap, n) + dKGIndexMap := make(flow.DKGIndexMap, len(cdcDKGIndexMap.Pairs)) for _, pair := range cdcDKGIndexMap.Pairs { nodeID, err := flow.HexStringToIdentifier(string(pair.Key.(cadence.String))) if err != nil { return nil, fmt.Errorf("failed to decode flow.Identifer in DKGIndexMap entry from EpochRecover event: %w", err) } index := pair.Value.(cadence.Int).Int() - commit.DKGIndexMap[nodeID] = index + dKGIndexMap[nodeID] = index + } - // enforce invariant needed for ID computation: DKGIndexMap values form the set {0, 1, ..., n-1} - if index < 0 || index >= n { - return nil, fmt.Errorf("index %d is outside allowed range [0,n-1] for a DKG committee of size n=%d", index, n) - } - if encounteredIndices[index] { - return nil, fmt.Errorf("duplicated DKG index %d", index) - } - encounteredIndices[index] = true + commit, err := flow.NewEpochCommit( + flow.UntrustedEpochCommit{ + Counter: uint64(counter), + ClusterQCs: clusterQCs, + DKGGroupKey: dKGGroupKey, + DKGParticipantKeys: dKGParticipantKeys, + DKGIndexMap: dKGIndexMap, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch commit: %w", err) } // create the service event - serviceEvent := &flow.ServiceEvent{ - Type: flow.ServiceEventRecover, - Event: &flow.EpochRecover{ - EpochSetup: setup, - EpochCommit: commit, + epochRecover, err := flow.NewEpochRecover( + flow.UntrustedEpochRecover{ + EpochSetup: *setup, + EpochCommit: *commit, }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch recover: %w", err) + } + + serviceEvent := &flow.ServiceEvent{ + Type: flow.ServiceEventRecover, + Event: epochRecover, } return serviceEvent, nil diff --git a/model/encodable/keys.go b/model/encodable/keys.go index ee0e657c31a..0176abfbf9c 100644 --- a/model/encodable/keys.go +++ b/model/encodable/keys.go @@ -26,7 +26,7 @@ func toHex(bs []byte) string { func fromJSONHex(b []byte) ([]byte, error) { var x string if err := json.Unmarshal(b, &x); err != nil { - return nil, fmt.Errorf("could not unmarshal the key: %w", err) + return nil, fmt.Errorf("could not unmarshal the value: %w", err) } return hex.DecodeString(x) } @@ -34,7 +34,7 @@ func fromJSONHex(b []byte) ([]byte, error) { func fromMsgPackHex(b []byte) ([]byte, error) { var x string if err := msgpack.Unmarshal(b, &x); err != nil { - return nil, fmt.Errorf("could not unmarshal the key: %w", err) + return nil, fmt.Errorf("could not unmarshal the value: %w", err) } return hex.DecodeString(x) } @@ -42,7 +42,7 @@ func fromMsgPackHex(b []byte) ([]byte, error) { func fromCBORPackHex(b []byte) ([]byte, error) { var x string if err := cbor.Unmarshal(b, &x); err != nil { - return nil, fmt.Errorf("could not unmarshal the key: %w", err) + return nil, fmt.Errorf("could not unmarshal the value: %w", err) } return hex.DecodeString(x) } @@ -303,3 +303,21 @@ func (priv *MachineAccountPrivKey) UnmarshalJSON(b []byte) error { priv.PrivateKey, err = crypto.DecodePrivateKey(crypto.ECDSAP256, bz) return err } + +// StakingKeyPoP wraps a crypto signature and allows it to be JSON encoded and decoded. +type StakingKeyPoP struct { + crypto.Signature +} + +func (pub StakingKeyPoP) MarshalJSON() ([]byte, error) { + if pub.Signature == nil { + return json.Marshal(nil) + } + return json.Marshal(toHex(pub.Signature)) +} + +func (pub *StakingKeyPoP) UnmarshalJSON(b []byte) error { + var err error + pub.Signature, err = fromJSONHex(b) + return err +} diff --git a/model/encodable/keys_test.go b/model/encodable/keys_test.go index ddb1c812461..48f816ad37a 100644 --- a/model/encodable/keys_test.go +++ b/model/encodable/keys_test.go @@ -257,3 +257,21 @@ func generateRandomSeed(t *testing.T) []byte { require.Equal(t, n, 48) return seed } + +func TestEncodableStakingKeyPoP(t *testing.T) { + sig := crypto.Signature(make([]byte, crypto.SignatureLenBLSBLS12381)) + _, err := rand.Read(sig) + require.NoError(t, err) + pop := StakingKeyPoP{sig} + + enc, err := json.Marshal(pop) + require.NoError(t, err) + require.NotEmpty(t, enc) + require.NoError(t, isHexString(enc)) + + var dec StakingKeyPoP + err = json.Unmarshal(enc, &dec) + require.NoError(t, err) + + require.Equal(t, sig, dec.Signature, "encoded/decoded signature equality check failed") +} diff --git a/model/encoding/rlp/rlp_test.go b/model/encoding/rlp/rlp_test.go index 4b07e5d8a71..2982e5d4cf4 100644 --- a/model/encoding/rlp/rlp_test.go +++ b/model/encoding/rlp/rlp_test.go @@ -3,7 +3,7 @@ package rlp_test import ( "testing" - "github.com/onflow/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/model/flow/epoch.go b/model/flow/epoch.go index 03e93b2bf1b..4902c686386 100644 --- a/model/flow/epoch.go +++ b/model/flow/epoch.go @@ -3,6 +3,7 @@ package flow import ( "bytes" "encoding/json" + "fmt" "io" "github.com/ethereum/go-ethereum/rlp" @@ -136,6 +137,8 @@ const EpochSetupRandomSourceLength = 16 // When an EpochSetup event is accepted and incorporated into the Protocol State, this triggers the // Distributed Key Generation [DKG] and cluster QC voting process for the next epoch. // It also causes the current epoch to enter the EpochPhaseSetup phase. +// +//structwrite:immutable - mutations allowed only within the constructor type EpochSetup struct { Counter uint64 // the number of the epoch being setup (current+1) FirstView uint64 // the first view of the epoch being setup @@ -150,6 +153,69 @@ type EpochSetup struct { TargetEndTime uint64 // desired real-world end time for the epoch in UNIX time [seconds] } +// UntrustedEpochSetup is an untrusted input-only representation of an EpochSetup, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedEpochSetup should be validated and converted into +// a trusted EpochSetup using NewEpochSetup constructor. +type UntrustedEpochSetup EpochSetup + +// NewEpochSetup creates a new instance of EpochSetup. +// Construction EpochSetup allowed only within the constructor. +// +// All errors indicate a valid EpochSetup cannot be constructed from the input. +func NewEpochSetup(untrusted UntrustedEpochSetup) (*EpochSetup, error) { + if untrusted.FirstView >= untrusted.FinalView { + return nil, fmt.Errorf("invalid timing - first view (%d) ends after the final view (%d)", untrusted.FirstView, untrusted.FinalView) + } + if untrusted.FirstView >= untrusted.DKGPhase1FinalView { + return nil, fmt.Errorf("invalid timing - first view (%d) ends after dkg phase 1 (%d)", untrusted.FirstView, untrusted.DKGPhase1FinalView) + } + if untrusted.DKGPhase1FinalView >= untrusted.DKGPhase2FinalView { + return nil, fmt.Errorf("invalid dkg timing - phase 1 (%d) ends after phase 2 (%d)", untrusted.DKGPhase1FinalView, untrusted.DKGPhase2FinalView) + } + if untrusted.DKGPhase2FinalView >= untrusted.DKGPhase3FinalView { + return nil, fmt.Errorf("invalid dkg timing - phase 2 (%d) ends after phase 3 (%d)", untrusted.DKGPhase2FinalView, untrusted.DKGPhase3FinalView) + } + if untrusted.DKGPhase3FinalView >= untrusted.FinalView { + return nil, fmt.Errorf("invalid timing - dkg phase 3 (%d) ends after final view (%d)", untrusted.DKGPhase3FinalView, untrusted.FinalView) + } + if untrusted.Participants == nil { + return nil, fmt.Errorf("participants must not be nil") + } + if untrusted.Assignments == nil { + return nil, fmt.Errorf("assignments must not be nil") + } + if len(untrusted.RandomSource) != EpochSetupRandomSourceLength { + return nil, fmt.Errorf( + "random source must be of (%d) bytes, got (%d)", + EpochSetupRandomSourceLength, + len(untrusted.RandomSource), + ) + } + if untrusted.TargetDuration == 0 { + return nil, fmt.Errorf("target duration must be greater than 0") + } + + return &EpochSetup{ + Counter: untrusted.Counter, + FirstView: untrusted.FirstView, + DKGPhase1FinalView: untrusted.DKGPhase1FinalView, + DKGPhase2FinalView: untrusted.DKGPhase2FinalView, + DKGPhase3FinalView: untrusted.DKGPhase3FinalView, + FinalView: untrusted.FinalView, + Participants: untrusted.Participants, + Assignments: untrusted.Assignments, + RandomSource: untrusted.RandomSource, + TargetDuration: untrusted.TargetDuration, + TargetEndTime: untrusted.TargetEndTime, + }, nil +} + func (setup *EpochSetup) ServiceEvent() ServiceEvent { return ServiceEvent{ Type: ServiceEventSetup, @@ -199,11 +265,51 @@ func (setup *EpochSetup) EqualTo(other *EpochSetup) bool { // EpochRecover service event is emitted when network is in Epoch Fallback Mode(EFM) in an attempt to return to happy path. // It contains data from EpochSetup, and EpochCommit events to so replicas can create a committed epoch from which they // can continue operating on the happy path. +// +//structwrite:immutable - mutations allowed only within the constructor type EpochRecover struct { EpochSetup EpochSetup EpochCommit EpochCommit } +// UntrustedEpochRecover is an untrusted input-only representation of an EpochRecover, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedEpochRecover should be validated and converted into +// a trusted EpochRecover using NewEpochRecover constructor. +type UntrustedEpochRecover EpochRecover + +// NewEpochRecover creates a new instance of EpochRecover. +// Construction EpochRecover allowed only within the constructor. +// +// All errors indicate a valid EpochRecover cannot be constructed from the input. +func NewEpochRecover(untrusted UntrustedEpochRecover) (*EpochRecover, error) { + // EpochSetup and must be non-empty and is intended to be constructed solely through the constructor. + if untrusted.EpochSetup.EqualTo(new(EpochSetup)) { + return nil, fmt.Errorf("EpochSetup is empty") + } + // EpochCommit and must be non-empty and is intended to be constructed solely through the constructor. + if untrusted.EpochCommit.EqualTo(new(EpochCommit)) { + return nil, fmt.Errorf("EpochCommit is empty") + } + + if untrusted.EpochCommit.Counter != untrusted.EpochSetup.Counter { + return nil, fmt.Errorf("inconsistent epoch counter between commit (%d) and setup (%d) events in same epoch", untrusted.EpochCommit.Counter, untrusted.EpochSetup.Counter) + } + if len(untrusted.EpochSetup.Assignments) != len(untrusted.EpochCommit.ClusterQCs) { + return nil, fmt.Errorf("number of clusters (%d) does not match number of QCs (%d)", len(untrusted.EpochSetup.Assignments), len(untrusted.EpochCommit.ClusterQCs)) + } + + return &EpochRecover{ + EpochSetup: untrusted.EpochSetup, + EpochCommit: untrusted.EpochCommit, + }, nil +} + func (er *EpochRecover) ServiceEvent() ServiceEvent { return ServiceEvent{ Type: ServiceEventRecover, @@ -241,6 +347,8 @@ func (er *EpochRecover) EqualTo(other *EpochRecover) bool { // artifacts produced by the DKG are referred to with the "DKG" prefix (for example, DKGGroupKey). // These artifacts are *produced by* the DKG, but used for the Random Beacon. As such, other // components refer to these same artifacts with the "RandomBeacon" prefix. +// +//structwrite:immutable - mutations allowed only within the constructor type EpochCommit struct { // Counter is the epoch counter of the epoch being committed Counter uint64 @@ -267,6 +375,58 @@ type EpochCommit struct { DKGIndexMap DKGIndexMap } +// UntrustedEpochCommit is an untrusted input-only representation of an EpochCommit, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedEpochCommit should be validated and converted into +// a trusted EpochCommit using NewEpochCommit constructor. +type UntrustedEpochCommit EpochCommit + +// NewEpochCommit creates a new instance of EpochCommit. +// Construction EpochCommit allowed only within the constructor. +// +// All errors indicate a valid EpochCommit cannot be constructed from the input. +func NewEpochCommit(untrusted UntrustedEpochCommit) (*EpochCommit, error) { + if untrusted.DKGGroupKey == nil { + return nil, fmt.Errorf("DKG group key must not be nil") + } + if len(untrusted.ClusterQCs) == 0 { + return nil, fmt.Errorf("cluster QCs list must not be empty") + } + // TODO(mainnet27): remove this conditional: https://github.com/onflow/flow-go/issues/6772 + if untrusted.DKGIndexMap != nil { + // enforce invariant: len(DKGParticipantKeys) == len(DKGIndexMap) + n := len(untrusted.DKGIndexMap) // size of the DKG committee + if len(untrusted.DKGParticipantKeys) != n { + return nil, fmt.Errorf("number of %d Random Beacon key shares is inconsistent with number of DKG participants (len=%d)", len(untrusted.DKGParticipantKeys), len(untrusted.DKGIndexMap)) + } + + // enforce invariant: DKGIndexMap values form the set {0, 1, ..., n-1} where n=len(DKGParticipantKeys) + encounteredIndex := make([]bool, n) + for _, index := range untrusted.DKGIndexMap { + if index < 0 || index >= n { + return nil, fmt.Errorf("index %d is outside allowed range [0,n-1] for a DKG committee of size n=%d", index, n) + } + if encounteredIndex[index] { + return nil, fmt.Errorf("duplicated DKG index %d", index) + } + encounteredIndex[index] = true + } + } + + return &EpochCommit{ + Counter: untrusted.Counter, + ClusterQCs: untrusted.ClusterQCs, + DKGGroupKey: untrusted.DKGGroupKey, + DKGParticipantKeys: untrusted.DKGParticipantKeys, + DKGIndexMap: untrusted.DKGIndexMap, + }, nil +} + // ClusterQCVoteData represents the votes for a cluster quorum certificate, as // gathered by the ClusterQC smart contract. It contains the aggregated // signature over the root block for the cluster as well as the set of voters. @@ -337,22 +497,24 @@ func encodableFromCommit(commit *EpochCommit) encodableCommit { } } -func commitFromEncodable(enc encodableCommit) EpochCommit { +func commitFromEncodable(enc encodableCommit) (*EpochCommit, error) { dkgKeys := make([]crypto.PublicKey, 0, len(enc.DKGParticipantKeys)) for _, key := range enc.DKGParticipantKeys { dkgKeys = append(dkgKeys, key.PublicKey) } - return EpochCommit{ - Counter: enc.Counter, - ClusterQCs: enc.ClusterQCs, - DKGGroupKey: enc.DKGGroupKey.PublicKey, - DKGParticipantKeys: dkgKeys, - DKGIndexMap: enc.DKGIndexMap, - } + return NewEpochCommit( + UntrustedEpochCommit{ + Counter: enc.Counter, + ClusterQCs: enc.ClusterQCs, + DKGGroupKey: enc.DKGGroupKey.PublicKey, + DKGParticipantKeys: dkgKeys, + DKGIndexMap: enc.DKGIndexMap, + }, + ) } -func (commit EpochCommit) MarshalJSON() ([]byte, error) { - return json.Marshal(encodableFromCommit(&commit)) +func (commit *EpochCommit) MarshalJSON() ([]byte, error) { + return json.Marshal(encodableFromCommit(commit)) } func (commit *EpochCommit) UnmarshalJSON(b []byte) error { @@ -362,7 +524,12 @@ func (commit *EpochCommit) UnmarshalJSON(b []byte) error { return err } - *commit = commitFromEncodable(enc) + newCommit, err := commitFromEncodable(enc) + if err != nil { + return err + } + *commit = *newCommit + return nil } @@ -377,7 +544,12 @@ func (commit *EpochCommit) UnmarshalCBOR(b []byte) error { return err } - *commit = commitFromEncodable(enc) + newCommit, err := commitFromEncodable(enc) + if err != nil { + return err + } + *commit = *newCommit + return nil } @@ -391,7 +563,12 @@ func (commit *EpochCommit) UnmarshalMsgpack(b []byte) error { if err != nil { return err } - *commit = commitFromEncodable(enc) + newCommit, err := commitFromEncodable(enc) + if err != nil { + return err + } + *commit = *newCommit + return nil } diff --git a/model/flow/epoch_test.go b/model/flow/epoch_test.go index 6e35e6939be..667e641f716 100644 --- a/model/flow/epoch_test.go +++ b/model/flow/epoch_test.go @@ -2,6 +2,7 @@ package flow_test import ( "testing" + "time" "github.com/onflow/crypto" "github.com/stretchr/testify/require" @@ -11,7 +12,6 @@ import ( ) func TestClusterQCVoteData_Equality(t *testing.T) { - pks := unittest.PublicKeysFixture(2, crypto.BLSBLS12381) _ = len(pks) @@ -77,7 +77,6 @@ func TestClusterQCVoteData_Equality(t *testing.T) { } func TestEpochCommit_EqualTo(t *testing.T) { - qcA := flow.ClusterQCVoteData{ SigData: []byte{3, 3, 3}, VoterIDs: []flow.Identifier{flow.HashToID([]byte{1, 2, 3}), flow.HashToID([]byte{3, 2, 1})}, @@ -214,7 +213,6 @@ func TestEpochCommit_EqualTo(t *testing.T) { } func TestEpochSetup_EqualTo(t *testing.T) { - identityA := &unittest.IdentityFixture().IdentitySkeleton identityB := &unittest.IdentityFixture().IdentitySkeleton @@ -340,3 +338,469 @@ func TestEpochSetup_EqualTo(t *testing.T) { require.False(t, b.EqualTo(a)) }) } + +// TestNewEpochSetup verifies the behavior of the NewEpochSetup constructor function. +// It checks for correct handling of both valid and invalid inputs. +// +// Test Cases: +// +// 1. Valid input returns setup: +// - Ensures that providing all required and correctly formatted fields results in a successful creation of an EpochSetup instance. +// +// 2. Invalid FirstView and FinalView: +// - Verifies that an error is returned when FirstView is not less than FinalView, as this violates the expected chronological order. +// +// 3. Invalid FirstView >= DKGPhase1FinalView: +// - Ensures that FirstView must end before DKG Phase 1 ends. +// +// 4. Invalid DKGPhase1FinalView >= DKGPhase2FinalView: +// - Ensures DKG Phase 1 must end before DKG Phase 2 ends. +// +// 5. Invalid DKGPhase2FinalView >= DKGPhase3FinalView: +// - Ensures DKG Phase 2 must end before DKG Phase 3 ends. +// +// 6. Invalid DKGPhase3FinalView >= FinalView: +// - Ensures DKG Phase 3 must end before FinalView. +// +// 7. Invalid participants: +// - Checks that an error is returned when the Participants field is nil. +// +// 8. Invalid assignments: +// - Ensures that an error is returned when the Assignments field is nil. +// +// 9. Invalid RandomSource: +// - Validates that an error is returned when the RandomSource does not meet the required length. +// +// 10. Invalid TargetDuration: +// - Confirms that an error is returned when TargetDuration is zero. +func TestNewEpochSetup(t *testing.T) { + participants := unittest.IdentityListFixture(5, unittest.WithAllRoles()) + validParticipants := participants.Sort(flow.Canonical[flow.Identity]).ToSkeleton() + validRandomSource := unittest.SeedFixture(flow.EpochSetupRandomSourceLength) + validAssignments := unittest.ClusterAssignment(1, validParticipants) + + t.Run("valid input", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + Counter: 1, + FirstView: 10, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 40, + FinalView: 50, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: validRandomSource, + TargetDuration: 60 * 60, + TargetEndTime: uint64(time.Now().Unix()) + 1000, + } + + setup, err := flow.NewEpochSetup(untrusted) + require.NoError(t, err) + require.NotNil(t, setup) + }) + + t.Run("invalid FirstView and FinalView", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 100, + FinalView: 90, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 40, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: validRandomSource, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "invalid timing - first view (100) ends after the final view (90)") + }) + + t.Run("invalid FirstView >= DKGPhase1FinalView", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 20, + DKGPhase1FinalView: 10, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 40, + FinalView: 50, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: validRandomSource, + TargetDuration: 60, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "invalid timing - first view (20) ends after dkg phase 1 (10)") + }) + + t.Run("invalid DKGPhase1FinalView >= DKGPhase2FinalView", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 10, + DKGPhase1FinalView: 30, + DKGPhase2FinalView: 20, + DKGPhase3FinalView: 40, + FinalView: 50, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: validRandomSource, + TargetDuration: 60, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "invalid dkg timing - phase 1 (30) ends after phase 2 (20)") + }) + + t.Run("invalid DKGPhase2FinalView >= DKGPhase3FinalView", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 10, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 40, + DKGPhase3FinalView: 30, + FinalView: 50, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: validRandomSource, + TargetDuration: 60, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "invalid dkg timing - phase 2 (40) ends after phase 3 (30)") + }) + + t.Run("invalid DKGPhase3FinalView >= FinalView", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 10, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 60, + FinalView: 50, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: validRandomSource, + TargetDuration: 60, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "invalid timing - dkg phase 3 (60) ends after final view (50)") + }) + + t.Run("invalid participants", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 10, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 40, + FinalView: 50, + Participants: nil, + Assignments: validAssignments, + RandomSource: validRandomSource, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "participants must not be nil") + }) + + t.Run("invalid assignments", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 10, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 40, + FinalView: 50, + Participants: validParticipants, + Assignments: nil, + RandomSource: validRandomSource, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "assignments must not be nil") + }) + + t.Run("invalid RandomSource", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 10, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 40, + FinalView: 50, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: make([]byte, flow.EpochSetupRandomSourceLength-1), // too short + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "random source must be of") + }) + t.Run("invalid TargetDuration", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 10, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 40, + FinalView: 50, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: validRandomSource, + TargetDuration: 0, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "target duration must be greater than 0") + }) +} + +// TestNewEpochCommit validates the behavior of the NewEpochCommit constructor function. +// It checks for correct handling of both valid and invalid inputs. +// +// Test Cases: +// +// 1. Valid input returns commit: +// - Ensures that providing all required and correctly formatted fields results in a successful creation of an EpochCommit instance. +// +// 2. Nil DKGGroupKey: +// - Verifies that an error is returned when DKGGroupKey is nil. + +// 3. Empty cluster QCs list: +// - Verifies that an error is returned when cluster QCs list is empty. +// +// 4. Mismatched DKGParticipantKeys and DKGIndexMap lengths: +// - Checks that an error is returned when the number of DKGParticipantKeys does not match the length of DKGIndexMap. +// +// 5. DKGIndexMap with out-of-range index: +// - Ensures that an error is returned when DKGIndexMap contains an index outside the valid range. +// +// 6. DKGIndexMap with duplicate indices: +// - Validates that an error is returned when DKGIndexMap contains duplicate indices. +func TestNewEpochCommit(t *testing.T) { + // Setup common valid data + validParticipantKeys := unittest.PublicKeysFixture(2, crypto.BLSBLS12381) + validDKGGroupKey := unittest.KeyFixture(crypto.BLSBLS12381).PublicKey() + validIndexMap := flow.DKGIndexMap{ + unittest.IdentifierFixture(): 0, + unittest.IdentifierFixture(): 1, + } + validClusterQCs := []flow.ClusterQCVoteData{ + { + VoterIDs: []flow.Identifier{ + unittest.IdentifierFixture(), + unittest.IdentifierFixture(), + }, + SigData: []byte{1, 1, 1}, + }, + { + VoterIDs: []flow.Identifier{ + unittest.IdentifierFixture(), + unittest.IdentifierFixture(), + }, + SigData: []byte{2, 2, 2}, + }, + } + + t.Run("valid input", func(t *testing.T) { + untrusted := flow.UntrustedEpochCommit{ + Counter: 1, + ClusterQCs: validClusterQCs, + DKGGroupKey: validDKGGroupKey, + DKGParticipantKeys: validParticipantKeys, + DKGIndexMap: validIndexMap, + } + + commit, err := flow.NewEpochCommit(untrusted) + require.NoError(t, err) + require.NotNil(t, commit) + }) + + t.Run("nil DKGGroupKey", func(t *testing.T) { + untrusted := flow.UntrustedEpochCommit{ + Counter: 1, + ClusterQCs: validClusterQCs, + DKGGroupKey: nil, + DKGParticipantKeys: validParticipantKeys, + DKGIndexMap: validIndexMap, + } + + commit, err := flow.NewEpochCommit(untrusted) + require.Error(t, err) + require.Nil(t, commit) + require.Contains(t, err.Error(), "DKG group key must not be nil") + }) + + t.Run("empty list of cluster QCs", func(t *testing.T) { + untrusted := flow.UntrustedEpochCommit{ + Counter: 1, + ClusterQCs: []flow.ClusterQCVoteData{}, + DKGGroupKey: validDKGGroupKey, + DKGParticipantKeys: validParticipantKeys, + DKGIndexMap: validIndexMap, + } + + commit, err := flow.NewEpochCommit(untrusted) + require.Error(t, err) + require.Nil(t, commit) + require.Contains(t, err.Error(), "cluster QCs list must not be empty") + }) + + t.Run("mismatched DKGParticipantKeys and DKGIndexMap lengths", func(t *testing.T) { + untrusted := flow.UntrustedEpochCommit{ + Counter: 1, + ClusterQCs: validClusterQCs, + DKGGroupKey: validDKGGroupKey, + DKGParticipantKeys: unittest.PublicKeysFixture(1, crypto.BLSBLS12381), // Only one key + DKGIndexMap: validIndexMap, // Two entries + } + + commit, err := flow.NewEpochCommit(untrusted) + require.Error(t, err) + require.Nil(t, commit) + require.Contains(t, err.Error(), "number of 1 Random Beacon key shares is inconsistent with number of DKG participants (len=2)") + }) + + t.Run("DKGIndexMap with out-of-range index", func(t *testing.T) { + invalidIndexMap := flow.DKGIndexMap{ + unittest.IdentifierFixture(): 0, + unittest.IdentifierFixture(): 2, // Index out of range for 2 participants + } + + untrusted := flow.UntrustedEpochCommit{ + Counter: 1, + ClusterQCs: validClusterQCs, + DKGGroupKey: validDKGGroupKey, + DKGParticipantKeys: validParticipantKeys, + DKGIndexMap: invalidIndexMap, + } + + commit, err := flow.NewEpochCommit(untrusted) + require.Error(t, err) + require.Nil(t, commit) + require.Contains(t, err.Error(), "index 2 is outside allowed range [0,n-1] for a DKG committee of size n=2") + }) + + t.Run("DKGIndexMap with duplicate indices", func(t *testing.T) { + duplicateIndexMap := flow.DKGIndexMap{ + unittest.IdentifierFixture(): 0, + unittest.IdentifierFixture(): 0, // Duplicate index + } + + untrusted := flow.UntrustedEpochCommit{ + Counter: 1, + ClusterQCs: validClusterQCs, + DKGGroupKey: validDKGGroupKey, + DKGParticipantKeys: validParticipantKeys, + DKGIndexMap: duplicateIndexMap, + } + + commit, err := flow.NewEpochCommit(untrusted) + require.Error(t, err) + require.Nil(t, commit) + require.Contains(t, err.Error(), "duplicated DKG index 0") + }) +} + +// TestNewEpochRecover validates the behavior of the NewEpochRecover constructor function. +// It checks for correct handling of both valid and invalid inputs. +// +// Test Cases: +// +// 1. Valid input returns recover: +// - Ensures that providing non-empty EpochSetup and EpochCommit results in a successful creation of an EpochRecover instance. +// +// 2. Empty EpochSetup: +// - Verifies that an error is returned when EpochSetup is empty. +// +// 3. Empty EpochCommit: +// - Checks that an error is returned when EpochCommit is empty. +// +// 4. Mismatched cluster counts: +// - Validates that an error is returned when the number of Assignments in EpochSetup does not match the number of ClusterQCs in EpochCommit. +// +// 5. Mismatched epoch counters: +// - Ensures that an error is returned when the Counter values in EpochSetup and EpochCommit do not match. + +func TestNewEpochRecover(t *testing.T) { + // Setup common valid data + setupParticipants := unittest.IdentityListFixture(5, unittest.WithAllRoles()).Sort(flow.Canonical[flow.Identity]) + + validSetup := unittest.EpochSetupFixture( + unittest.SetupWithCounter(1), + unittest.WithParticipants(setupParticipants.ToSkeleton()), + ) + validCommit := unittest.EpochCommitFixture( + unittest.CommitWithCounter(1), + unittest.WithDKGFromParticipants(validSetup.Participants), + ) + + t.Run("valid input", func(t *testing.T) { + untrusted := flow.UntrustedEpochRecover{ + EpochSetup: *validSetup, + EpochCommit: *validCommit, + } + + recoverEpoch, err := flow.NewEpochRecover(untrusted) + require.NoError(t, err) + require.NotNil(t, recoverEpoch) + }) + + t.Run("empty EpochSetup", func(t *testing.T) { + untrusted := flow.UntrustedEpochRecover{ + EpochSetup: *new(flow.EpochSetup), // Empty setup + EpochCommit: *validCommit, + } + + recoverEpoch, err := flow.NewEpochRecover(untrusted) + require.Error(t, err) + require.Nil(t, recoverEpoch) + require.Contains(t, err.Error(), "EpochSetup is empty") + }) + + t.Run("empty EpochCommit", func(t *testing.T) { + untrusted := flow.UntrustedEpochRecover{ + EpochSetup: *validSetup, + EpochCommit: *new(flow.EpochCommit), // Empty commit + } + + recoverEpoch, err := flow.NewEpochRecover(untrusted) + require.Error(t, err) + require.Nil(t, recoverEpoch) + require.Contains(t, err.Error(), "EpochCommit is empty") + }) + + t.Run("mismatched cluster counts", func(t *testing.T) { + // Create a copy of validSetup with an extra assignment + mismatchedSetup := *validSetup + mismatchedSetup.Assignments = unittest.ClusterAssignment(2, setupParticipants.ToSkeleton()) + + untrusted := flow.UntrustedEpochRecover{ + EpochSetup: mismatchedSetup, + EpochCommit: *validCommit, + } + + recoverEpoch, err := flow.NewEpochRecover(untrusted) + require.Error(t, err) + require.Nil(t, recoverEpoch) + require.Contains(t, err.Error(), "does not match number of QCs") + }) + + t.Run("mismatched epoch counters", func(t *testing.T) { + // Create a copy of validCommit with a different counter + mismatchedCommit := *validCommit + mismatchedCommit.Counter = validSetup.Counter + 1 + + untrusted := flow.UntrustedEpochRecover{ + EpochSetup: *validSetup, + EpochCommit: mismatchedCommit, + } + + recoverEpoch, err := flow.NewEpochRecover(untrusted) + require.Error(t, err) + require.Nil(t, recoverEpoch) + require.Contains(t, err.Error(), "inconsistent epoch counter") + }) +} diff --git a/model/flow/header.go b/model/flow/header.go index a4ee2efbc78..e5ca226af12 100644 --- a/model/flow/header.go +++ b/model/flow/header.go @@ -2,6 +2,7 @@ package flow import ( "encoding/json" + "fmt" "time" "github.com/fxamacker/cbor/v2" @@ -76,14 +77,30 @@ func (h Header) Body() interface{} { } } -// QuorumCertificate returns quorum certificate that is incorporated in the block header. -func (h Header) QuorumCertificate() *QuorumCertificate { - return &QuorumCertificate{ +// ParentQC returns quorum certificate that is incorporated in the block header. +// Callers *must* first verify that a parent QC is present (e.g. via ContainsParentQC) +// before calling ParentQC. If no valid parent QC data exists (such as on a spork‐root +// header), ParentQC will panic. +func (h Header) ParentQC() *QuorumCertificate { + qc, err := NewQuorumCertificate(UntrustedQuorumCertificate{ BlockID: h.ParentID, View: h.ParentView, SignerIndices: h.ParentVoterIndices, SigData: h.ParentVoterSigData, + }) + if err != nil { + panic(fmt.Errorf("could not build parent quorum certificate: %w", err)) } + + return qc +} + +// ContainsParentQC reports whether this header carries a valid parent QC. +// It returns true only if all of the fields required to build a QC are non-zero/nil, +// indicating that ParentQC() can be safely called without panicking. +// Only spork root blocks or network genesis blocks do not contain a parent QC. +func (h Header) ContainsParentQC() bool { + return h.ParentID != ZeroID && h.ParentVoterIndices != nil && h.ParentVoterSigData != nil && h.ProposerID != ZeroID } func (h Header) Fingerprint() []byte { diff --git a/model/flow/incorporated_result.go b/model/flow/incorporated_result.go index 7d9c29611b8..de5eb66f0c7 100644 --- a/model/flow/incorporated_result.go +++ b/model/flow/incorporated_result.go @@ -1,7 +1,11 @@ package flow +import "fmt" + // IncorporatedResult is a wrapper around an ExecutionResult which contains the // ID of the first block on its fork in which it was incorporated. +// +//structwrite:immutable - mutations allowed only within the constructor type IncorporatedResult struct { // IncorporatedBlockID is the ID of the first block on its fork where a // receipt for this result was incorporated. Within a fork, multiple blocks @@ -14,11 +18,34 @@ type IncorporatedResult struct { Result *ExecutionResult } -func NewIncorporatedResult(incorporatedBlockID Identifier, result *ExecutionResult) *IncorporatedResult { - return &IncorporatedResult{ - IncorporatedBlockID: incorporatedBlockID, - Result: result, +// UntrustedIncorporatedResult is an untrusted input-only representation of an IncorporatedResult, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedIncorporatedResult should be validated and converted into +// a trusted IncorporatedResult using NewIncorporatedResult constructor. +type UntrustedIncorporatedResult IncorporatedResult + +// NewIncorporatedResult creates a new instance of IncorporatedResult. +// Construction IncorporatedResult allowed only within the constructor +// +// All errors indicate a valid IncorporatedResult cannot be constructed from the input. +func NewIncorporatedResult(untrusted UntrustedIncorporatedResult) (*IncorporatedResult, error) { + if untrusted.IncorporatedBlockID == ZeroID { + return nil, fmt.Errorf("IncorporatedBlockID must not be empty") + } + + if untrusted.Result == nil { + return nil, fmt.Errorf("Result must not be empty") } + + return &IncorporatedResult{ + IncorporatedBlockID: untrusted.IncorporatedBlockID, + Result: untrusted.Result, + }, nil } // ID implements flow.Entity.ID for IncorporatedResult to make it capable of diff --git a/model/flow/incorporated_result_test.go b/model/flow/incorporated_result_test.go index 0f01f5913e7..dccc1cb7764 100644 --- a/model/flow/incorporated_result_test.go +++ b/model/flow/incorporated_result_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -13,10 +14,23 @@ import ( // * grouping should preserve order and multiplicity of elements // * group for unknown identifier should be empty func TestIncorporatedResultGroupBy(t *testing.T) { + ir1, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: unittest.IdentifierFixture(), + Result: unittest.ExecutionResultFixture(), + }) + require.NoError(t, err) - ir1 := flow.NewIncorporatedResult(unittest.IdentifierFixture(), unittest.ExecutionResultFixture()) - ir2 := flow.NewIncorporatedResult(unittest.IdentifierFixture(), unittest.ExecutionResultFixture()) - ir3 := flow.NewIncorporatedResult(unittest.IdentifierFixture(), unittest.ExecutionResultFixture()) + ir2, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: unittest.IdentifierFixture(), + Result: unittest.ExecutionResultFixture(), + }) + require.NoError(t, err) + + ir3, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: unittest.IdentifierFixture(), + Result: unittest.ExecutionResultFixture(), + }) + require.NoError(t, err) idA := unittest.IdentifierFixture() idB := unittest.IdentifierFixture() @@ -41,3 +55,54 @@ func TestIncorporatedResultGroupBy(t *testing.T) { unknown := groups.GetGroup(unittest.IdentifierFixture()) assert.Equal(t, 0, unknown.Size()) } + +// TestNewIncorporatedResult verifies that NewIncorporatedResult constructs a valid +// IncorporatedResult when given complete, non-zero fields, and returns an error +// if any required field is missing. +// It covers: +// - valid incorporated result creation +// - missing IncorporatedBlockID +// - nil Result +func TestNewIncorporatedResult(t *testing.T) { + t.Run("valid untrusted incorporated result", func(t *testing.T) { + id := unittest.IdentifierFixture() + // Use a real ExecutionResult fixture and take its address + er := unittest.ExecutionResultFixture() + uc := flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: id, + Result: er, + } + + ir, err := flow.NewIncorporatedResult(uc) + assert.NoError(t, err) + assert.NotNil(t, ir) + assert.Equal(t, id, ir.IncorporatedBlockID) + assert.Equal(t, er, ir.Result) + }) + + t.Run("missing IncorporatedBlockID", func(t *testing.T) { + er := unittest.ExecutionResultFixture() + uc := flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: flow.ZeroID, + Result: er, + } + + ir, err := flow.NewIncorporatedResult(uc) + assert.Error(t, err) + assert.Nil(t, ir) + assert.Contains(t, err.Error(), "IncorporatedBlockID") + }) + + t.Run("nil Result", func(t *testing.T) { + id := unittest.IdentifierFixture() + uc := flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: id, + Result: nil, + } + + ir, err := flow.NewIncorporatedResult(uc) + assert.Error(t, err) + assert.Nil(t, ir) + assert.Contains(t, err.Error(), "Result") + }) +} diff --git a/model/flow/ledger.go b/model/flow/ledger.go index e68a8863017..d18935c37d7 100644 --- a/model/flow/ledger.go +++ b/model/flow/ledger.go @@ -253,6 +253,9 @@ type StorageProof = []byte // TODO: solve the circular dependency and define StateCommitment as ledger.State type StateCommitment hash.Hash +// EmptyStateCommitment is the zero-value state commitment. +var EmptyStateCommitment = StateCommitment{} + // DummyStateCommitment is an arbitrary value used in function failure cases, // although it can represent a valid state commitment. var DummyStateCommitment = StateCommitment(hash.DummyHash) diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go index d884d139e88..118313e3468 100644 --- a/model/flow/protocol_state.go +++ b/model/flow/protocol_state.go @@ -12,6 +12,20 @@ type DynamicIdentityEntry struct { Ejected bool } +// EqualTo returns true if the two DynamicIdentityEntry are equivalent. +func (d *DynamicIdentityEntry) EqualTo(other *DynamicIdentityEntry) bool { + // Shortcut if `t` and `other` point to the same object; covers case where both are nil. + if d == other { + return true + } + if d == nil || other == nil { // only one is nil, the other not (otherwise we would have returned above) + return false + } + + return d.NodeID == other.NodeID && + d.Ejected == other.Ejected +} + type DynamicIdentityEntryList []*DynamicIdentityEntry // MinEpochStateEntry is the most compact snapshot of the epoch state and identity table (set of all notes authorized to @@ -21,6 +35,8 @@ type DynamicIdentityEntryList []*DynamicIdentityEntry // table that is constant throughout an epoch, are only referenced by their hash commitment. // Note that a MinEpochStateEntry does not hold the entire data for the identity table directly. It // allows reconstructing the identity table with the referenced epoch setup events and dynamic identities. +// +//structwrite:immutable - mutations allowed only within the constructor type MinEpochStateEntry struct { PreviousEpoch *EpochStateContainer // minimal dynamic properties for previous epoch [optional, nil for first epoch after spork, genesis] CurrentEpoch EpochStateContainer // minimal dynamic properties for current epoch @@ -34,9 +50,38 @@ type MinEpochStateEntry struct { EpochFallbackTriggered bool } +// UntrustedMinEpochStateEntry is an untrusted input-only representation of a MinEpochStateEntry, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedMinEpochStateEntry should be validated and converted into +// a trusted MinEpochStateEntry using NewMinEpochStateEntry constructor. +type UntrustedMinEpochStateEntry MinEpochStateEntry + +// NewMinEpochStateEntry creates a new instance of MinEpochStateEntry. +// Construction MinEpochStateEntry allowed only within the constructor. +// +// All errors indicate a valid MinEpochStateEntry cannot be constructed from the input. +func NewMinEpochStateEntry(untrusted UntrustedMinEpochStateEntry) (*MinEpochStateEntry, error) { + if untrusted.CurrentEpoch.EqualTo(new(EpochStateContainer)) { + return nil, fmt.Errorf("current epoch must not be empty") + } + return &MinEpochStateEntry{ + PreviousEpoch: untrusted.PreviousEpoch, + CurrentEpoch: untrusted.CurrentEpoch, + NextEpoch: untrusted.NextEpoch, + EpochFallbackTriggered: untrusted.EpochFallbackTriggered, + }, nil +} + // EpochStateContainer holds the data pertaining to a _single_ epoch but no information about // any adjacent epochs. To perform a transition from epoch N to N+1, EpochStateContainers for // both epochs are necessary. +// +//structwrite:immutable - mutations allowed only within the constructor type EpochStateContainer struct { // ID of setup event for this epoch, never nil. SetupID Identifier @@ -65,12 +110,60 @@ type EpochStateContainer struct { EpochExtensions []EpochExtension } +// UntrustedEpochStateContainer is an untrusted input-only representation of a EpochStateContainer, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedEpochStateContainer should be validated and converted into +// a trusted EpochStateContainer using NewEpochStateContainer constructor. +type UntrustedEpochStateContainer EpochStateContainer + +// NewEpochStateContainer creates a new instance of EpochStateContainer. +// Construction EpochStateContainer allowed only within the constructor. +// +// All errors indicate a valid EpochStateContainer cannot be constructed from the input. +func NewEpochStateContainer(untrusted UntrustedEpochStateContainer) (*EpochStateContainer, error) { + if untrusted.SetupID == ZeroID { + return nil, fmt.Errorf("SetupID must not be zero") + } + if untrusted.ActiveIdentities == nil { + return nil, fmt.Errorf("ActiveIdentities must not be nil") + } + if !untrusted.ActiveIdentities.Sorted(IdentifierCanonical) { + return nil, fmt.Errorf("ActiveIdentities are not sorted") + } + + return &EpochStateContainer{ + SetupID: untrusted.SetupID, + CommitID: untrusted.CommitID, + ActiveIdentities: untrusted.ActiveIdentities, + EpochExtensions: untrusted.EpochExtensions, + }, nil +} + // EpochExtension represents a range of views, which contiguously extends this epoch. type EpochExtension struct { FirstView uint64 FinalView uint64 } +// EqualTo returns true if the two EpochExtension are equivalent. +func (e *EpochExtension) EqualTo(other *EpochExtension) bool { + // Shortcut if `t` and `other` point to the same object; covers case where both are nil. + if e == other { + return true + } + if e == nil || other == nil { // only one is nil, the other not (otherwise we would have returned above) + return false + } + + return e.FirstView == other.FirstView && + e.FinalView == other.FinalView +} + // ID returns an identifier for this EpochStateContainer by hashing internal fields. // Per convention, the ID of a `nil` EpochStateContainer is `flow.ZeroID`. func (c *EpochStateContainer) ID() Identifier { @@ -101,6 +194,9 @@ func (c *EpochStateContainer) Copy() *EpochStateContainer { ext = make([]EpochExtension, len(c.EpochExtensions)) copy(ext, c.EpochExtensions) } + + // Constructor is skipped since we're copying an already-valid object. + //nolint:structwrite return &EpochStateContainer{ SetupID: c.SetupID, CommitID: c.CommitID, @@ -109,6 +205,37 @@ func (c *EpochStateContainer) Copy() *EpochStateContainer { } } +// EqualTo returns true if the two EpochStateContainer are equivalent. +func (c *EpochStateContainer) EqualTo(other *EpochStateContainer) bool { + // Shortcut if `t` and `other` point to the same object; covers case where both are nil. + if c == other { + return true + } + if c == nil || other == nil { // only one is nil, the other not (otherwise we would have returned above) + return false + } + // both are not nil, so we can compare the fields + if c.SetupID != other.SetupID { + return false + } + if c.CommitID != other.CommitID { + return false + } + if !slices.EqualFunc(c.ActiveIdentities, other.ActiveIdentities, func(e1 *DynamicIdentityEntry, e2 *DynamicIdentityEntry) bool { + return e1.EqualTo(e2) + }) { + return false + } + + if !slices.EqualFunc(c.EpochExtensions, other.EpochExtensions, func(e1 EpochExtension, e2 EpochExtension) bool { + return e1.EqualTo(&e2) + }) { + return false + } + + return true +} + // EpochStateEntry is a MinEpochStateEntry that has additional fields that are cached from the // storage layer for convenience. It holds all the information needed to construct a snapshot of // the identity table (set of all notes authorized to be part of the network) at some specific @@ -119,6 +246,8 @@ func (c *EpochStateContainer) Copy() *EpochStateContainer { // - CurrentEpochSetup and CurrentEpochCommit are for the same epoch. Never nil. // - PreviousEpochSetup and PreviousEpochCommit are for the same epoch. Can be nil. // - NextEpochSetup and NextEpochCommit are for the same epoch. Can be nil. +// +//structwrite:immutable - mutations allowed only within the constructor. type EpochStateEntry struct { *MinEpochStateEntry @@ -131,50 +260,44 @@ type EpochStateEntry struct { NextEpochCommit *EpochCommit } -// NewEpochStateEntry constructs a EpochStateEntry from an MinEpochStateEntry and additional data. -// No errors are expected during normal operation. All errors indicate inconsistent or invalid inputs. -func NewEpochStateEntry( - epochState *MinEpochStateEntry, - previousEpochSetup *EpochSetup, - previousEpochCommit *EpochCommit, - currentEpochSetup *EpochSetup, - currentEpochCommit *EpochCommit, - nextEpochSetup *EpochSetup, - nextEpochCommit *EpochCommit, -) (*EpochStateEntry, error) { - result := &EpochStateEntry{ - MinEpochStateEntry: epochState, - PreviousEpochSetup: previousEpochSetup, - PreviousEpochCommit: previousEpochCommit, - CurrentEpochSetup: currentEpochSetup, - CurrentEpochCommit: currentEpochCommit, - NextEpochSetup: nextEpochSetup, - NextEpochCommit: nextEpochCommit, - } +// UntrustedEpochStateEntry is an untrusted input-only representation of an EpochStateEntry, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedEpochStateEntry should be validated and converted into +// a trusted EpochStateEntry using NewEpochStateEntry constructor. +type UntrustedEpochStateEntry EpochStateEntry +// NewEpochStateEntry constructs an EpochStateEntry from a MinEpochStateEntry and additional data. +// +// All errors indicate a valid EpochStateEntry cannot be constructed from the input. +func NewEpochStateEntry(untrusted UntrustedEpochStateEntry) (*EpochStateEntry, error) { // If previous epoch is specified: ensure respective epoch service events are not nil and consistent with commitments in `MinEpochStateEntry.PreviousEpoch` - if epochState.PreviousEpoch != nil { - if epochState.PreviousEpoch.SetupID != previousEpochSetup.ID() { // calling ID() will panic is EpochSetup event is nil - return nil, fmt.Errorf("supplied previous epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", previousEpochSetup.ID(), epochState.PreviousEpoch.SetupID) + if untrusted.PreviousEpoch != nil { + if untrusted.PreviousEpoch.SetupID != untrusted.PreviousEpochSetup.ID() { // calling ID() will panic is EpochSetup event is nil + return nil, fmt.Errorf("supplied previous epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", untrusted.PreviousEpochSetup.ID(), untrusted.PreviousEpoch.SetupID) } - if epochState.PreviousEpoch.CommitID != previousEpochCommit.ID() { // calling ID() will panic is EpochCommit event is nil - return nil, fmt.Errorf("supplied previous epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", previousEpochCommit.ID(), epochState.PreviousEpoch.CommitID) + if untrusted.PreviousEpoch.CommitID != untrusted.PreviousEpochCommit.ID() { // calling ID() will panic is EpochCommit event is nil + return nil, fmt.Errorf("supplied previous epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", untrusted.PreviousEpochCommit.ID(), untrusted.PreviousEpoch.CommitID) } } else { - if previousEpochSetup != nil { + if untrusted.PreviousEpochSetup != nil { return nil, fmt.Errorf("no previous epoch but gotten non-nil EpochSetup event") } - if previousEpochCommit != nil { + if untrusted.PreviousEpochCommit != nil { return nil, fmt.Errorf("no previous epoch but gotten non-nil EpochCommit event") } } // For current epoch: ensure respective epoch service events are not nil and consistent with commitments in `MinEpochStateEntry.CurrentEpoch` - if epochState.CurrentEpoch.SetupID != currentEpochSetup.ID() { // calling ID() will panic is EpochSetup event is nil - return nil, fmt.Errorf("supplied current epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", currentEpochSetup.ID(), epochState.CurrentEpoch.SetupID) + if untrusted.CurrentEpoch.SetupID != untrusted.CurrentEpochSetup.ID() { // calling ID() will panic is EpochSetup event is nil + return nil, fmt.Errorf("supplied current epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", untrusted.CurrentEpochSetup.ID(), untrusted.CurrentEpoch.SetupID) } - if epochState.CurrentEpoch.CommitID != currentEpochCommit.ID() { // calling ID() will panic is EpochCommit event is nil - return nil, fmt.Errorf("supplied current epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", currentEpochCommit.ID(), epochState.CurrentEpoch.CommitID) + if untrusted.CurrentEpoch.CommitID != untrusted.CurrentEpochCommit.ID() { // calling ID() will panic is EpochCommit event is nil + return nil, fmt.Errorf("supplied current epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", untrusted.CurrentEpochCommit.ID(), untrusted.CurrentEpoch.CommitID) } // If we are in staking phase (i.e. epochState.NextEpoch == nil): @@ -184,30 +307,38 @@ func NewEpochStateEntry( // (2a) Full identity table contains active identities from current epoch + nodes joining in next epoch with `EpochParticipationStatusJoining` status. // (2b) Furthermore, we also build the full identity table for the next epoch's staking phase: // active identities from next epoch + nodes from current epoch that are leaving at the end of the current epoch with `flow.EpochParticipationStatusLeaving` status. - nextEpoch := epochState.NextEpoch + nextEpoch := untrusted.NextEpoch if nextEpoch == nil { // in staking phase: build full identity table for current epoch according to (1) - if nextEpochSetup != nil { + if untrusted.NextEpochSetup != nil { return nil, fmt.Errorf("no next epoch but gotten non-nil EpochSetup event") } - if nextEpochCommit != nil { + if untrusted.NextEpochCommit != nil { return nil, fmt.Errorf("no next epoch but gotten non-nil EpochCommit event") } } else { // epochState.NextEpoch ≠ nil, i.e. we are in epoch setup or epoch commit phase // ensure respective epoch service events are not nil and consistent with commitments in `MinEpochStateEntry.NextEpoch` - if nextEpoch.SetupID != nextEpochSetup.ID() { - return nil, fmt.Errorf("supplied next epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", nextEpoch.SetupID, nextEpochSetup.ID()) + if nextEpoch.SetupID != untrusted.NextEpochSetup.ID() { + return nil, fmt.Errorf("supplied next epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", nextEpoch.SetupID, untrusted.NextEpochSetup.ID()) } if nextEpoch.CommitID != ZeroID { - if nextEpoch.CommitID != nextEpochCommit.ID() { - return nil, fmt.Errorf("supplied next epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", nextEpoch.CommitID, nextEpochCommit.ID()) + if nextEpoch.CommitID != untrusted.NextEpochCommit.ID() { + return nil, fmt.Errorf("supplied next epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", nextEpoch.CommitID, untrusted.NextEpochCommit.ID()) } } else { - if nextEpochCommit != nil { + if untrusted.NextEpochCommit != nil { return nil, fmt.Errorf("next epoch not yet committed but got EpochCommit event") } } } - return result, nil + return &EpochStateEntry{ + MinEpochStateEntry: untrusted.MinEpochStateEntry, + PreviousEpochSetup: untrusted.PreviousEpochSetup, + PreviousEpochCommit: untrusted.PreviousEpochCommit, + CurrentEpochSetup: untrusted.CurrentEpochSetup, + CurrentEpochCommit: untrusted.CurrentEpochCommit, + NextEpochSetup: untrusted.NextEpochSetup, + NextEpochCommit: untrusted.NextEpochCommit, + }, nil } // RichEpochStateEntry is a EpochStateEntry that additionally holds the canonical representation of the @@ -227,6 +358,8 @@ func NewEpochStateEntry( // the Identity Table additionally contains nodes (with weight zero) from the previous or // upcoming epoch, which are transitioning into / out of the network and are only allowed // to listen but not to actively contribute. +// +//structwrite:immutable - mutations allowed only within the constructor type RichEpochStateEntry struct { *EpochStateEntry @@ -235,15 +368,15 @@ type RichEpochStateEntry struct { } // NewRichEpochStateEntry constructs a RichEpochStateEntry from an EpochStateEntry. -// No errors are expected during normal operation. All errors indicate inconsistent or invalid inputs. -func NewRichEpochStateEntry( - epochState *EpochStateEntry, -) (*RichEpochStateEntry, error) { - result := &RichEpochStateEntry{ - EpochStateEntry: epochState, - CurrentEpochIdentityTable: IdentityList{}, - NextEpochIdentityTable: IdentityList{}, +// Construction RichEpochStateEntry allowed only within the constructor. +// +// All errors indicate a valid RichEpochStateEntry cannot be constructed from the input. +func NewRichEpochStateEntry(epochState *EpochStateEntry) (*RichEpochStateEntry, error) { + if epochState == nil { + return nil, fmt.Errorf("epoch state must not be nil") } + var currentEpochIdentityTable IdentityList + nextEpochIdentityTable := IdentityList{} // If we are in staking phase (i.e. epochState.NextEpoch == nil): // (1) Full identity table contains active identities from current epoch. // If previous epoch exists, we add nodes from previous epoch that are leaving in the current epoch with status `EpochParticipationStatusLeaving`. @@ -260,7 +393,7 @@ func NewRichEpochStateEntry( previousEpochIdentitySkeletons = previousEpochSetup.Participants previousEpochDynamicIdentities = epochState.PreviousEpoch.ActiveIdentities } - result.CurrentEpochIdentityTable, err = BuildIdentityTable( + currentEpochIdentityTable, err = BuildIdentityTable( epochState.CurrentEpochSetup.Participants, epochState.CurrentEpoch.ActiveIdentities, previousEpochIdentitySkeletons, @@ -271,7 +404,7 @@ func NewRichEpochStateEntry( return nil, fmt.Errorf("could not build identity table for staking phase: %w", err) } } else { // epochState.NextEpoch ≠ nil, i.e. we are in epoch setup or epoch commit phase - result.CurrentEpochIdentityTable, err = BuildIdentityTable( + currentEpochIdentityTable, err = BuildIdentityTable( epochState.CurrentEpochSetup.Participants, epochState.CurrentEpoch.ActiveIdentities, epochState.NextEpochSetup.Participants, @@ -282,7 +415,7 @@ func NewRichEpochStateEntry( return nil, fmt.Errorf("could not build identity table for setup/commit phase: %w", err) } - result.NextEpochIdentityTable, err = BuildIdentityTable( + nextEpochIdentityTable, err = BuildIdentityTable( epochState.NextEpochSetup.Participants, nextEpoch.ActiveIdentities, epochState.CurrentEpochSetup.Participants, @@ -293,7 +426,12 @@ func NewRichEpochStateEntry( return nil, fmt.Errorf("could not build next epoch identity table: %w", err) } } - return result, nil + + return &RichEpochStateEntry{ + EpochStateEntry: epochState, + CurrentEpochIdentityTable: currentEpochIdentityTable, + NextEpochIdentityTable: nextEpochIdentityTable, + }, nil } // ID returns hash of entry by hashing all fields. @@ -321,6 +459,8 @@ func (e *MinEpochStateEntry) Copy() *MinEpochStateEntry { if e == nil { return nil } + // Constructor is skipped since we're copying an already-valid object. + //nolint:structwrite return &MinEpochStateEntry{ PreviousEpoch: e.PreviousEpoch.Copy(), CurrentEpoch: *e.CurrentEpoch.Copy(), @@ -335,6 +475,9 @@ func (e *EpochStateEntry) Copy() *EpochStateEntry { if e == nil { return nil } + + // Constructor is skipped since we're copying an already-valid object. + //nolint:structwrite return &EpochStateEntry{ MinEpochStateEntry: e.MinEpochStateEntry.Copy(), PreviousEpochSetup: e.PreviousEpochSetup, @@ -353,6 +496,8 @@ func (e *RichEpochStateEntry) Copy() *RichEpochStateEntry { if e == nil { return nil } + // Constructor is skipped since we're copying an already-valid object. + //nolint:structwrite return &RichEpochStateEntry{ EpochStateEntry: e.EpochStateEntry.Copy(), CurrentEpochIdentityTable: e.CurrentEpochIdentityTable.Copy(), @@ -570,3 +715,8 @@ type PSKeyValueStoreData struct { Version uint64 Data []byte } + +func (d PSKeyValueStoreData) Equal(d2 *PSKeyValueStoreData) bool { + return d.Version == d2.Version && + slices.Equal(d.Data, d2.Data) +} diff --git a/model/flow/protocol_state_test.go b/model/flow/protocol_state_test.go index 3fdb3122231..a40f5b24f1d 100644 --- a/model/flow/protocol_state_test.go +++ b/model/flow/protocol_state_test.go @@ -2,9 +2,12 @@ package flow_test import ( "fmt" + "math/rand" "testing" + clone "github.com/huandu/go-clone/generic" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -50,8 +53,35 @@ func TestEpochProtocolStateEntry_EpochPhase(t *testing.T) { // TestNewRichProtocolStateEntry checks that NewRichEpochStateEntry creates valid identity tables depending on the state // of epoch which is derived from the protocol state entry. +// It checks for correct handling of both valid and invalid inputs, ensuring that the function +// correctly validates epoch service event consistency and presence. +// +// Valid Cases: +// +// 1. staking-root-protocol-state: +// - No previous epoch; current epoch is in staking phase. +// +// 2. staking-phase: +// - Previous and current epochs exist; no next epoch. +// +// 3. setup-phase: +// - Next epoch setup is present; next epoch commit is nil. +// +// 4. setup-after-spork: +// - First epoch after spork; no previous epoch; next epoch setup is present. +// +// 5. commit-phase: +// - Previous, current, and next epochs are fully populated. +// +// 6. commit-after-spork: +// - First epoch after spork; current and next epochs are committed. +// +// Invalid Cases: +// +// 7. invalid - epoch state is nil: +// - Verifies that constructor returns an error if EpochStateEntry is nil. func TestNewRichProtocolStateEntry(t *testing.T) { - // Conditions right after a spork: + // 1. Conditions right after a spork: // * no previous epoch exists from the perspective of the freshly-sporked protocol state // * network is currently in the staking phase for the next epoch, hence no service events for the next epoch exist t.Run("staking-root-protocol-state", func(t *testing.T) { @@ -74,13 +104,15 @@ func TestNewRichProtocolStateEntry(t *testing.T) { EpochFallbackTriggered: false, } stateEntry, err := flow.NewEpochStateEntry( - minStateEntry, - nil, - nil, - setup, - currentEpochCommit, - nil, - nil, + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: minStateEntry, + PreviousEpochSetup: nil, + PreviousEpochCommit: nil, + CurrentEpochSetup: setup, + CurrentEpochCommit: currentEpochCommit, + NextEpochSetup: nil, + NextEpochCommit: nil, + }, ) assert.NoError(t, err) assert.Equal(t, flow.EpochPhaseStaking, stateEntry.EpochPhase()) @@ -99,20 +131,22 @@ func TestNewRichProtocolStateEntry(t *testing.T) { assert.Equal(t, expectedIdentities, richStateEntry.CurrentEpochIdentityTable, "should be equal to current epoch setup participants") }) - // Common situation during the staking phase for epoch N+1 + // 2. Common situation during the staking phase for epoch N+1 // * we are currently in Epoch N // * previous epoch N-1 is known (specifically EpochSetup and EpochCommit events) // * network is currently in the staking phase for the next epoch, hence no service events for the next epoch exist t.Run("staking-phase", func(t *testing.T) { stateEntryFixture := unittest.EpochStateFixture() epochStateEntry, err := flow.NewEpochStateEntry( - stateEntryFixture.MinEpochStateEntry, - stateEntryFixture.PreviousEpochSetup, - stateEntryFixture.PreviousEpochCommit, - stateEntryFixture.CurrentEpochSetup, - stateEntryFixture.CurrentEpochCommit, - nil, - nil, + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: nil, + NextEpochCommit: nil, + }, ) assert.NoError(t, err) assert.Equal(t, flow.EpochPhaseStaking, epochStateEntry.EpochPhase()) @@ -131,7 +165,7 @@ func TestNewRichProtocolStateEntry(t *testing.T) { assert.Nil(t, epochRichStateEntry.NextEpoch) }) - // Common situation during the epoch setup phase for epoch N+1 + // 3. Common situation during the epoch setup phase for epoch N+1 // * we are currently in Epoch N // * previous epoch N-1 is known (specifically EpochSetup and EpochCommit events) // * network is currently in the setup phase for the next epoch, i.e. EpochSetup event (starting setup phase) has already been observed @@ -142,13 +176,15 @@ func TestNewRichProtocolStateEntry(t *testing.T) { }) stateEntry, err := flow.NewEpochStateEntry( - stateEntryFixture.MinEpochStateEntry, - stateEntryFixture.PreviousEpochSetup, - stateEntryFixture.PreviousEpochCommit, - stateEntryFixture.CurrentEpochSetup, - stateEntryFixture.CurrentEpochCommit, - stateEntryFixture.NextEpochSetup, - nil, + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: nil, + }, ) assert.NoError(t, err) assert.Equal(t, flow.EpochPhaseSetup, stateEntry.EpochPhase()) @@ -176,7 +212,7 @@ func TestNewRichProtocolStateEntry(t *testing.T) { assert.Equal(t, expectedIdentities, richStateEntry.NextEpochIdentityTable, "should be equal to next epoch setup participants + current epoch setup participants") }) - // Common situation during the epoch setup phase for first epoch after the spork + // 4. Common situation during the epoch setup phase for first epoch after the spork // * we are currently in Epoch N // * there is no previous epoch as we are in the first epoch after the spork // * network is currently in the setup phase for the next epoch, i.e. EpochSetup event (starting setup phase) has already been observed @@ -197,13 +233,15 @@ func TestNewRichProtocolStateEntry(t *testing.T) { assert.Nil(t, stateEntryFixture.PreviousEpochCommit) stateEntry, err := flow.NewEpochStateEntry( - stateEntryFixture.MinEpochStateEntry, - stateEntryFixture.PreviousEpochSetup, - stateEntryFixture.PreviousEpochCommit, - stateEntryFixture.CurrentEpochSetup, - stateEntryFixture.CurrentEpochCommit, - stateEntryFixture.NextEpochSetup, - nil, + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: nil, + }, ) assert.NoError(t, err) assert.Equal(t, flow.EpochPhaseSetup, stateEntry.EpochPhase()) @@ -231,7 +269,7 @@ func TestNewRichProtocolStateEntry(t *testing.T) { assert.Equal(t, expectedIdentities, richStateEntry.NextEpochIdentityTable, "should be equal to next epoch setup participants + current epoch setup participants") }) - // Common situation during the epoch commit phase for epoch N+1 + // 5. Common situation during the epoch commit phase for epoch N+1 // * we are currently in Epoch N // * previous epoch N-1 is known (specifically EpochSetup and EpochCommit events) // * The network has completed the epoch commit phase, i.e. published the EpochSetup and EpochCommit events for epoch N+1. @@ -239,13 +277,15 @@ func TestNewRichProtocolStateEntry(t *testing.T) { stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) stateEntry, err := flow.NewEpochStateEntry( - stateEntryFixture.MinEpochStateEntry, - stateEntryFixture.PreviousEpochSetup, - stateEntryFixture.PreviousEpochCommit, - stateEntryFixture.CurrentEpochSetup, - stateEntryFixture.CurrentEpochCommit, - stateEntryFixture.NextEpochSetup, - stateEntryFixture.NextEpochCommit, + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, ) assert.NoError(t, err) assert.Equal(t, flow.EpochPhaseCommitted, stateEntry.EpochPhase()) @@ -272,7 +312,7 @@ func TestNewRichProtocolStateEntry(t *testing.T) { assert.Equal(t, expectedIdentities, richStateEntry.NextEpochIdentityTable, "should be equal to next epoch setup participants + current epoch setup participants") }) - // Common situation during the epoch commit phase for first epoch after the spork + // 6. Common situation during the epoch commit phase for first epoch after the spork // * we are currently in Epoch N // * there is no previous epoch as we are in the first epoch after the spork // * The network has completed the epoch commit phase, i.e. published the EpochSetup and EpochCommit events for epoch N+1. @@ -289,13 +329,15 @@ func TestNewRichProtocolStateEntry(t *testing.T) { assert.Nil(t, stateEntryFixture.PreviousEpochCommit) stateEntry, err := flow.NewEpochStateEntry( - stateEntryFixture.MinEpochStateEntry, - stateEntryFixture.PreviousEpochSetup, - stateEntryFixture.PreviousEpochCommit, - stateEntryFixture.CurrentEpochSetup, - stateEntryFixture.CurrentEpochCommit, - stateEntryFixture.NextEpochSetup, - stateEntryFixture.NextEpochCommit, + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, ) assert.NoError(t, err) assert.Equal(t, flow.EpochPhaseCommitted, stateEntry.EpochPhase()) @@ -321,6 +363,348 @@ func TestNewRichProtocolStateEntry(t *testing.T) { assert.NoError(t, err) assert.Equal(t, expectedIdentities, richStateEntry.NextEpochIdentityTable, "should be equal to next epoch setup participants + current epoch setup participants") }) + + // 7. Invalid: epochState is nil + t.Run("invalid - epoch state is nil", func(t *testing.T) { + _, err := flow.NewRichEpochStateEntry(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "epoch state must not be nil") + }) +} + +// TestNewEpochStateEntry tests the NewEpochStateEntry constructor with various valid and invalid inputs. +// +// Valid Cases: +// +// 1. Valid input with all fields populated: +// - Should successfully create an EpochStateEntry without error. +// +// 2. Valid input without NextEpochProtocolState: +// - Should successfully create an EpochStateEntry even if next epoch protocol state is not set. +// +// Invalid Cases: +// +// 3. PreviousEpoch.SetupID mismatch with PreviousEpochSetup.ID: +// - Should return an error for mismatched setup commitment. +// +// 4. PreviousEpoch.CommitID mismatch with PreviousEpochCommit.ID: +// - Should return an error for mismatched commit commitment. +// +// 5. PreviousEpoch is nil but PreviousEpochSetup is non-nil: +// - Should return an error for unexpected previous epoch's setup event. +// +// 6. PreviousEpoch is nil but PreviousEpochCommit is non-nil: +// - Should return an error for unexpected previous epoch's commit event. +// +// 7. CurrentEpoch.SetupID mismatch with CurrentEpochSetup.ID: +// - Should return an error for mismatched current epoch's setup event. +// +// 8. CurrentEpoch.CommitID mismatch with CurrentEpochCommit.ID: +// - Should return an error for mismatched current epoch's commit event. +// +// 9. NextEpoch is nil but NextEpochSetup is non-nil: +// - Should return an error for unexpected next epoch's setup event. +// +// 10. NextEpoch is nil but NextEpochCommit is non-nil: +// - Should return an error for unexpected next epoch's commit event. +// +// 11. NextEpoch.SetupID is non-zero but mismatches NextEpochSetup.ID: +// - Should return an error for next epoch's mismatched setup event. +// +// 12. NextEpoch.CommitID is non-zero but mismatches NextEpochCommit.ID: +// - Should return an error for mismatched next epoch's commit event. +// +// 13. NextEpoch.CommitID is zero but NextEpochCommit is non-nil: +// - Should return an error for unexpected commit event. +func TestNewEpochStateEntry(t *testing.T) { + // 1. Valid input with all fields + t.Run("valid input with all fields", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + entry, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.NoError(t, err) + require.NotNil(t, entry) + }) + + // 2. Valid input without NextEpochProtocolState + t.Run("valid input without NextEpochProtocolState", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture() + entry, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.NoError(t, err) + require.NotNil(t, entry) + }) + + // 3. Invalid: PreviousEpoch is set, but PreviousEpochSetup is nil + t.Run("invalid - previous epoch set but no setup event", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.PreviousEpochSetup = nil + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + expectedMsg := fmt.Sprintf( + "supplied previous epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", + stateEntryFixture.PreviousEpochSetup.ID(), + stateEntryFixture.MinEpochStateEntry.PreviousEpoch.SetupID, + ) + require.Contains(t, err.Error(), expectedMsg) + }) + + // 4. Invalid: PreviousEpoch.CommitID doesn't match PreviousEpochCommit.ID() + t.Run("invalid - previous commit ID mismatch", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.PreviousEpoch.CommitID = flow.ZeroID // incorrect + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + expectedMsg := fmt.Sprintf( + "supplied previous epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", + stateEntryFixture.PreviousEpochCommit.ID(), + stateEntryFixture.PreviousEpoch.CommitID, + ) + require.Contains(t, err.Error(), expectedMsg) + }) + + // 5. Invalid: PreviousEpoch is nil, but PreviousEpochSetup is non-nil + t.Run("invalid - nil previous epoch but has setup event", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.PreviousEpoch = nil + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "no previous epoch but gotten non-nil EpochSetup event") + }) + + // 6. Invalid: PreviousEpoch is nil, but PreviousEpochCommit is non-nil + t.Run("invalid - nil previous epoch but has commit event", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.PreviousEpoch = nil + entry.PreviousEpochSetup = nil + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "no previous epoch but gotten non-nil EpochCommit event") + }) + + // 7. Invalid: CurrentEpoch.SetupID doesn't match CurrentEpochSetup.ID() + t.Run("invalid - current setup ID mismatch", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.CurrentEpoch.SetupID = unittest.IdentifierFixture() // incorrect + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + expectedMsg := fmt.Sprintf( + "supplied current epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", + stateEntryFixture.CurrentEpochSetup.ID(), + stateEntryFixture.CurrentEpoch.SetupID, + ) + require.Contains(t, err.Error(), expectedMsg) + }) + + // 8. Invalid: CurrentEpoch.CommitID doesn't match CurrentEpochCommit.ID() + t.Run("invalid - current commit ID mismatch", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.CurrentEpoch.CommitID = unittest.IdentifierFixture() // incorrect + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + expectedMsg := fmt.Sprintf( + "supplied current epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", + stateEntryFixture.CurrentEpochCommit.ID(), + stateEntryFixture.CurrentEpoch.CommitID, + ) + require.Contains(t, err.Error(), expectedMsg) + }) + + // 9. Invalid: NextEpoch is nil, but NextEpochSetup is non-nil + t.Run("invalid - nil next epoch but has setup event", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpoch = nil + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "no next epoch but gotten non-nil EpochSetup event") + }) + + // 10. Invalid: NextEpoch is nil, but NextEpochCommit is non-nil + t.Run("invalid - nil next epoch but has commit event", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpoch = nil + entry.NextEpochSetup = nil + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "no next epoch but gotten non-nil EpochCommit") + }) + + // 11. Invalid: NextEpoch.SetupID ≠ NextEpochSetup.ID + t.Run("invalid - next commit ID mismatch", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpoch.SetupID = unittest.IdentifierFixture() // incorrect + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + expectedMsg := fmt.Sprintf( + "supplied next epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", + stateEntryFixture.NextEpoch.SetupID, + stateEntryFixture.NextEpochSetup.ID(), + ) + require.Contains(t, err.Error(), expectedMsg) + }) + + // 12. Invalid: NextEpoch.CommitID ≠ ZeroID, but NextEpochCommit.ID doesn't match + t.Run("invalid - next commit ID mismatch", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpoch.CommitID = unittest.IdentifierFixture() // incorrect + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + expectedMsg := fmt.Sprintf( + "supplied next epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", + stateEntryFixture.NextEpoch.CommitID, + stateEntryFixture.NextEpochCommit.ID(), + ) + require.Contains(t, err.Error(), expectedMsg) + }) + + // 13. Invalid: NextEpoch.CommitID == ZeroID, but NextEpochCommit is non-nil + t.Run("invalid - uncommitted next epoch but has commit event", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpoch.CommitID = flow.ZeroID + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + expectedMsg := "next epoch not yet committed but got EpochCommit event" + require.Contains(t, err.Error(), expectedMsg) + }) } // TestProtocolStateEntry_Copy tests if the copy method returns a deep copy of the entry. @@ -509,3 +893,397 @@ func TestBuildIdentityTable(t *testing.T) { assert.Empty(t, identityList) }) } + +// TestNewEpochStateContainer tests the NewEpochStateContainer constructor with valid and invalid inputs. +// +// Valid Cases: +// +// 1. Valid input with all fields: +// - Should successfully construct an EpochStateContainer. +// +// 2. Valid input with zero CommitID and nil EpochExtensions: +// - Should successfully construct an EpochStateContainer. +// +// Invalid Cases: +// +// 3. Invalid input with zero SetupID: +// - Should return an error indicating SetupID must not be zero. +// +// 4. Invalid input with nil ActiveIdentities: +// - Should return an error indicating ActiveIdentities must not be nil. +// +// 5. Invalid input with unsorted ActiveIdentities: +// - Should return an error indicating ActiveIdentities are not sorted. +func TestNewEpochStateContainer(t *testing.T) { + identities := unittest.DynamicIdentityEntryListFixture(3) + sortedIdentities := identities.Sort(flow.IdentifierCanonical) + + // Copy and shuffle to ensure it's unsorted + unsortedIdentities := sortedIdentities.Copy() + unsortedIdentities[0], unsortedIdentities[1] = unsortedIdentities[1], unsortedIdentities[0] + + // 1. Valid input with all fields + t.Run("valid input with all fields", func(t *testing.T) { + container, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: unittest.IdentifierFixture(), + ActiveIdentities: sortedIdentities, + EpochExtensions: []flow.EpochExtension{ + {FirstView: 100, FinalView: 200}, + }, + }, + ) + + require.NoError(t, err) + require.NotNil(t, container) + }) + + // 2. Valid input with zero CommitID and nil EpochExtensions + t.Run("valid input with zero CommitID and nil EpochExtensions", func(t *testing.T) { + container, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: flow.ZeroID, + ActiveIdentities: sortedIdentities, + EpochExtensions: nil, + }, + ) + + require.NoError(t, err) + require.NotNil(t, container) + }) + + // 3. Invalid input with zero SetupID + t.Run("invalid - zero SetupID", func(t *testing.T) { + _, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: flow.ZeroID, + ActiveIdentities: sortedIdentities, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "SetupID must not be zero") + }) + + // 4. Invalid input with nil ActiveIdentities + t.Run("invalid - nil ActiveIdentities", func(t *testing.T) { + _, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "ActiveIdentities must not be nil") + }) + + // 5. Invalid input with unsorted ActiveIdentities + t.Run("invalid - unsorted ActiveIdentities", func(t *testing.T) { + _, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + ActiveIdentities: unsortedIdentities, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "ActiveIdentities are not sorted") + }) +} + +// TestNewMinEpochStateEntry validates the behavior of the NewMinEpochStateEntry constructor function. +// It checks for correct handling of both valid and invalid inputs. +// +// Test Cases: +// +// 1. Valid input with all fields: +// - Ensures that providing a valid current epoch and optional previous/next epochs creates a MinEpochStateEntry. +// +// 2. Valid input with nil PreviousEpoch and NextEpoch: +// - Ensures that entry construction still succeeds with only CurrentEpoch. +// +// 3. Invalid input: empty CurrentEpoch: +// - Verifies that constructor returns an error if CurrentEpoch is not populated. +func TestNewMinEpochStateEntry(t *testing.T) { + identities := unittest.DynamicIdentityEntryListFixture(3) + + currentEpoch := flow.EpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: unittest.IdentifierFixture(), + ActiveIdentities: identities, + } + + previousEpoch := &flow.EpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: unittest.IdentifierFixture(), + ActiveIdentities: identities, + } + + nextEpoch := &flow.EpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: unittest.IdentifierFixture(), + ActiveIdentities: identities, + } + + // 1. Valid input with all fields + t.Run("valid input with all fields", func(t *testing.T) { + untrusted := flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: previousEpoch, + CurrentEpoch: currentEpoch, + NextEpoch: nextEpoch, + EpochFallbackTriggered: true, + } + + entry, err := flow.NewMinEpochStateEntry(untrusted) + require.NoError(t, err) + require.NotNil(t, entry) + }) + + // 2. Valid input with nil PreviousEpoch and NextEpoch + t.Run("valid input with nil PreviousEpoch and NextEpoch", func(t *testing.T) { + untrusted := flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: nil, + CurrentEpoch: currentEpoch, + NextEpoch: nil, + EpochFallbackTriggered: false, + } + + entry, err := flow.NewMinEpochStateEntry(untrusted) + require.NoError(t, err) + require.NotNil(t, entry) + }) + + // 3. Invalid input: empty CurrentEpoch + t.Run("empty CurrentEpoch", func(t *testing.T) { + untrusted := flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: nil, + CurrentEpoch: flow.EpochStateContainer{}, // Empty + NextEpoch: nil, + EpochFallbackTriggered: false, + } + + entry, err := flow.NewMinEpochStateEntry(untrusted) + require.Error(t, err) + require.Nil(t, entry) + require.Contains(t, err.Error(), "current epoch must not be empty") + }) +} + +// TestEpochStateContainer_EqualTo verifies the correctness of the EqualTo method on EpochStateContainer. +// It checks that containers are considered equal if and only if all fields match. +func TestEpochStateContainer_EqualTo(t *testing.T) { + // Create two containers with different values + identities1 := unittest.DynamicIdentityEntryListFixture(3) + identities2 := unittest.DynamicIdentityEntryListFixture(3) + + c1 := &flow.EpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: unittest.IdentifierFixture(), + ActiveIdentities: identities1, + EpochExtensions: []flow.EpochExtension{ + { + FirstView: 201, + FinalView: 300, + }, + }, + } + + c2 := &flow.EpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: unittest.IdentifierFixture(), + ActiveIdentities: identities2, + EpochExtensions: []flow.EpochExtension{ + { + FirstView: 301, + FinalView: 400, + }, + }, + } + + require.False(t, c1.EqualTo(c2), "Initially, all fields differ; EqualTo should return false") + + // List of mutations to apply to c1 to gradually make it equal to c2 + mutations := []func(){ + func() { + c1.SetupID = c2.SetupID + }, + func() { + c1.CommitID = c2.CommitID + }, + func() { + c1.ActiveIdentities = clone.Clone(c2.ActiveIdentities) + }, + func() { + c1.EpochExtensions = clone.Clone(c2.EpochExtensions) + }, + } + + // Shuffle the order of mutations + rand.Shuffle(len(mutations), func(i, j int) { + mutations[i], mutations[j] = mutations[j], mutations[i] + }) + + // Apply each mutation one at a time, except the last. + // After each step, the containers should still not be equal. + for _, mutation := range mutations[:len(mutations)-1] { + mutation() + require.False(t, c1.EqualTo(c2)) + } + + // Final mutation should make the containers fully equal. + mutations[len(mutations)-1]() + require.True(t, c1.EqualTo(c2)) +} + +// TestEpochStateContainer_EqualTo_Nil verifies the behavior of the EqualTo method on EpochStateContainer when either +// or both the receiver and the function input are nil. +func TestEpochStateContainer_EqualTo_Nil(t *testing.T) { + var nilContainer *flow.EpochStateContainer + nonNil := &flow.EpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: unittest.IdentifierFixture(), + ActiveIdentities: unittest.DynamicIdentityEntryListFixture(3), + EpochExtensions: []flow.EpochExtension{ + { + FirstView: 201, + FinalView: 300, + }, + }, + } + + t.Run("nil receiver", func(t *testing.T) { + require.False(t, nilContainer.EqualTo(nonNil)) + }) + + t.Run("nil input", func(t *testing.T) { + require.False(t, nonNil.EqualTo(nilContainer)) + }) + + t.Run("both nil", func(t *testing.T) { + require.True(t, nilContainer.EqualTo(nil)) + }) +} + +// TestEpochExtension_EqualTo verifies the correctness of the EqualTo method on EpochExtension. +// It checks that EpochExtensions are considered equal if and only if all fields match. +func TestEpochExtension_EqualTo(t *testing.T) { + // Create two extensions with different values + ext1 := &flow.EpochExtension{ + FirstView: 100, + FinalView: 200, + } + ext2 := &flow.EpochExtension{ + FirstView: 300, + FinalView: 400, + } + + require.False(t, ext1.EqualTo(ext2), "Initially, all fields differ; EqualTo should return false") + + // List of mutations to apply to ext1 to gradually make it equal to ext2 + mutations := []func(){ + func() { + ext1.FirstView = ext2.FirstView + }, + func() { + ext1.FinalView = ext2.FinalView + }, + } + + // Shuffle the order of mutations + rand.Shuffle(len(mutations), func(i, j int) { + mutations[i], mutations[j] = mutations[j], mutations[i] + }) + + // Apply each mutation one at a time, except the last. + // After each step, the extensions should still not be equal. + for _, mutation := range mutations[:len(mutations)-1] { + mutation() + require.False(t, ext1.EqualTo(ext2)) + } + + // Final mutation should make the extensions fully equal. + mutations[len(mutations)-1]() + require.True(t, ext1.EqualTo(ext2)) +} + +// TestEpochExtension_EqualTo_Nil verifies the behavior of the EqualTo method on EpochExtension when either +// or both the receiver and the function input are nil. +func TestEpochExtension_EqualTo_Nil(t *testing.T) { + var nilExt *flow.EpochExtension + nonNil := &flow.EpochExtension{ + FirstView: 1, + FinalView: 2, + } + + t.Run("nil receiver", func(t *testing.T) { + require.False(t, nilExt.EqualTo(nonNil)) + }) + + t.Run("nil input", func(t *testing.T) { + require.False(t, nonNil.EqualTo(nilExt)) + }) + + t.Run("both nil", func(t *testing.T) { + require.True(t, nilExt.EqualTo(nil)) + }) +} + +// TestDynamicIdentityEntry_EqualTo verifies the correctness of the EqualTo method on DynamicIdentityEntry. +// It checks that DynamicIdentityEntries are considered equal if and only if all fields match. +func TestDynamicIdentityEntry_EqualTo(t *testing.T) { + entry1 := &flow.DynamicIdentityEntry{ + NodeID: unittest.IdentifierFixture(), + Ejected: false, + } + entry2 := &flow.DynamicIdentityEntry{ + NodeID: unittest.IdentifierFixture(), + Ejected: true, + } + + require.False(t, entry1.EqualTo(entry2), "Initially, all fields differ; EqualTo should return false") + + // List of mutations to gradually make entry1 equal to entry2 + mutations := []func(){ + func() { + entry1.NodeID = entry2.NodeID + }, + func() { + entry1.Ejected = entry2.Ejected + }, + } + + // Shuffle mutation order + rand.Shuffle(len(mutations), func(i, j int) { + mutations[i], mutations[j] = mutations[j], mutations[i] + }) + + // Apply each mutation one at a time, except the last. + for _, mutation := range mutations[:len(mutations)-1] { + mutation() + require.False(t, entry1.EqualTo(entry2)) + } + + // Final mutation: should now be equal + mutations[len(mutations)-1]() + require.True(t, entry1.EqualTo(entry2)) +} + +// TestDynamicIdentityEntry_EqualTo_Nil verifies the behavior of EqualTo on DynamicIdentityEntry when one or both inputs are nil. +func TestDynamicIdentityEntry_EqualTo_Nil(t *testing.T) { + var nilEntry *flow.DynamicIdentityEntry + nonNil := &flow.DynamicIdentityEntry{ + NodeID: unittest.IdentifierFixture(), + Ejected: false, + } + + t.Run("nil receiver", func(t *testing.T) { + require.False(t, nilEntry.EqualTo(nonNil)) + }) + + t.Run("nil input", func(t *testing.T) { + require.False(t, nonNil.EqualTo(nilEntry)) + }) + + t.Run("both nil", func(t *testing.T) { + require.True(t, nilEntry.EqualTo(nil)) + }) +} diff --git a/model/flow/quorum_certificate.go b/model/flow/quorum_certificate.go index 3fac30f4cf1..ca014fe60ec 100644 --- a/model/flow/quorum_certificate.go +++ b/model/flow/quorum_certificate.go @@ -1,8 +1,12 @@ package flow +import "fmt" + // QuorumCertificate represents a quorum certificate for a block proposal as defined in the HotStuff algorithm. // A quorum certificate is a collection of votes for a particular block proposal. Valid quorum certificates contain // signatures from a super-majority of consensus committee members. +// +//structwrite:immutable - mutations allowed only within the constructor type QuorumCertificate struct { View uint64 BlockID Identifier @@ -22,6 +26,42 @@ type QuorumCertificate struct { SigData []byte } +// UntrustedQuorumCertificate is an untrusted input-only representation of a QuorumCertificate, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedQuorumCertificate should be validated and converted into +// a trusted QuorumCertificate using NewQuorumCertificate constructor. +type UntrustedQuorumCertificate QuorumCertificate + +// NewQuorumCertificate creates a new instance of QuorumCertificate. +// Construction of QuorumCertificate is allowed only within the constructor +// +// All errors indicate a valid QuorumCertificate cannot be constructed from the input. +func NewQuorumCertificate(untrusted UntrustedQuorumCertificate) (*QuorumCertificate, error) { + if untrusted.BlockID == ZeroID { + return nil, fmt.Errorf("BlockID must not be empty") + } + + if len(untrusted.SignerIndices) == 0 { + return nil, fmt.Errorf("SignerIndices must not be empty") + } + + if len(untrusted.SigData) == 0 { + return nil, fmt.Errorf("SigData must not be empty") + } + + return &QuorumCertificate{ + View: untrusted.View, + BlockID: untrusted.BlockID, + SignerIndices: untrusted.SignerIndices, + SigData: untrusted.SigData, + }, nil +} + // ID returns the QuorumCertificate's identifier func (qc *QuorumCertificate) ID() Identifier { if qc == nil { diff --git a/model/flow/quorum_certificate_test.go b/model/flow/quorum_certificate_test.go new file mode 100644 index 00000000000..514ddaf6f40 --- /dev/null +++ b/model/flow/quorum_certificate_test.go @@ -0,0 +1,104 @@ +package flow_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewQuorumCertificate verifies the behavior of the NewQuorumCertificate constructor. +// Test Cases: +// +// 1. Valid input: +// - Ensures a QuorumCertificate is returned when all fields are populated. +// +// 2. Missing BlockID: +// - Ensures an error is returned when BlockID is ZeroID. +// +// 3. Nil SignerIndices: +// - Ensures an error is returned when SignerIndices is nil. +// +// 4. Empty SignerIndices slice: +// - Ensures an error is returned when SignerIndices is empty. +// +// 5. Nil SigData: +// - Ensures an error is returned when SigData is nil. +// +// 6. Empty SigData slice: +// - Ensures an error is returned when SigData is empty. +func TestNewQuorumCertificate(t *testing.T) { + view := uint64(10) + blockID := unittest.IdentifierFixture() + signerIndices := []byte{0x01, 0x02} + sigData := []byte{0x03, 0x04} + + base := flow.UntrustedQuorumCertificate{ + View: view, + BlockID: blockID, + SignerIndices: signerIndices, + SigData: sigData, + } + + t.Run("valid input", func(t *testing.T) { + qc, err := flow.NewQuorumCertificate(base) + assert.NoError(t, err) + assert.NotNil(t, qc) + assert.Equal(t, view, qc.View) + assert.Equal(t, blockID, qc.BlockID) + assert.Equal(t, signerIndices, qc.SignerIndices) + assert.Equal(t, sigData, qc.SigData) + }) + + t.Run("missing BlockID", func(t *testing.T) { + u := base + u.BlockID = flow.ZeroID + + qc, err := flow.NewQuorumCertificate(u) + assert.Error(t, err) + assert.Nil(t, qc) + assert.Contains(t, err.Error(), "BlockID") + }) + + t.Run("nil SignerIndices", func(t *testing.T) { + u := base + u.SignerIndices = nil + + qc, err := flow.NewQuorumCertificate(u) + assert.Error(t, err) + assert.Nil(t, qc) + assert.Contains(t, err.Error(), "SignerIndices") + }) + + t.Run("empty SignerIndices slice", func(t *testing.T) { + u := base + u.SignerIndices = []byte{} + + qc, err := flow.NewQuorumCertificate(u) + assert.Error(t, err) + assert.Nil(t, qc) + assert.Contains(t, err.Error(), "SignerIndices") + }) + + t.Run("nil SigData", func(t *testing.T) { + u := base + u.SigData = nil + + qc, err := flow.NewQuorumCertificate(u) + assert.Error(t, err) + assert.Nil(t, qc) + assert.Contains(t, err.Error(), "SigData") + }) + + t.Run("empty SigData slice", func(t *testing.T) { + u := base + u.SigData = []byte{} + + qc, err := flow.NewQuorumCertificate(u) + assert.Error(t, err) + assert.Nil(t, qc) + assert.Contains(t, err.Error(), "SigData") + }) +} diff --git a/model/flow/resultApproval.go b/model/flow/resultApproval.go index 5b096a5a936..b68aa8643b8 100644 --- a/model/flow/resultApproval.go +++ b/model/flow/resultApproval.go @@ -1,22 +1,60 @@ package flow import ( + "fmt" + "github.com/onflow/crypto" ) // Attestation confirms correctness of a chunk of an exec result +// +//structwrite:immutable - mutations allowed only within the constructor type Attestation struct { BlockID Identifier // ID of the block included the collection ExecutionResultID Identifier // ID of the execution result ChunkIndex uint64 // index of the approved chunk } +// UntrustedAttestation is an untrusted input-only representation of an Attestation, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedAttestation should be validated and converted into +// a trusted Attestation using NewAttestation constructor. +type UntrustedAttestation Attestation + +// NewAttestation creates a new instance of Attestation. +// Construction Attestation allowed only within the constructor. +// +// All errors indicate a valid Attestation cannot be constructed from the input. +// ChunkIndex can be zero in principle, so we don’t check it. +func NewAttestation(untrusted UntrustedAttestation) (*Attestation, error) { + if untrusted.BlockID == ZeroID { + return nil, fmt.Errorf("BlockID must not be empty") + } + + if untrusted.ExecutionResultID == ZeroID { + return nil, fmt.Errorf("ExecutionResultID must not be empty") + } + + return &Attestation{ + BlockID: untrusted.BlockID, + ExecutionResultID: untrusted.ExecutionResultID, + ChunkIndex: untrusted.ChunkIndex, + }, nil +} + // ID generates a unique identifier using attestation func (a Attestation) ID() Identifier { return MakeID(a) } // ResultApprovalBody holds body part of a result approval +// +//structwrite:immutable - mutations allowed only within the constructor type ResultApprovalBody struct { Attestation ApproverID Identifier // node id generating this result approval @@ -24,6 +62,47 @@ type ResultApprovalBody struct { Spock crypto.Signature // proof of re-computation, one per each chunk } +// UntrustedResultApprovalBody is an untrusted input-only representation of an ResultApprovalBody, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedResultApprovalBody should be validated and converted into +// a trusted ResultApprovalBody using NewResultApprovalBody constructor. +type UntrustedResultApprovalBody ResultApprovalBody + +// NewResultApprovalBody creates a new instance of ResultApprovalBody. +// Construction ResultApprovalBody allowed only within the constructor. +// +// All errors indicate a valid Collection cannot be constructed from the input. +func NewResultApprovalBody(untrusted UntrustedResultApprovalBody) (*ResultApprovalBody, error) { + att, err := NewAttestation(UntrustedAttestation(untrusted.Attestation)) + if err != nil { + return nil, fmt.Errorf("invalid attestation: %w", err) + } + + if untrusted.ApproverID == ZeroID { + return nil, fmt.Errorf("ApproverID must not be empty") + } + + if len(untrusted.AttestationSignature) == 0 { + return nil, fmt.Errorf("AttestationSignature must not be empty") + } + + if len(untrusted.Spock) == 0 { + return nil, fmt.Errorf("Spock proof must not be empty") + } + + return &ResultApprovalBody{ + Attestation: *att, + ApproverID: untrusted.ApproverID, + AttestationSignature: untrusted.AttestationSignature, + Spock: untrusted.Spock, + }, nil +} + // PartialID generates a unique identifier using Attestation + ApproverID func (rab ResultApprovalBody) PartialID() Identifier { data := struct { @@ -43,11 +122,44 @@ func (rab ResultApprovalBody) ID() Identifier { } // ResultApproval includes an approval for a chunk, verified by a verification node +// +//structwrite:immutable - mutations allowed only within the constructor type ResultApproval struct { Body ResultApprovalBody VerifierSignature crypto.Signature // signature over all above fields } +// UntrustedResultApproval is an untrusted input-only representation of an ResultApproval, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedResultApproval should be validated and converted into +// a trusted ResultApproval using NewResultApproval constructor. +type UntrustedResultApproval ResultApproval + +// NewResultApproval creates a new instance of ResultApproval. +// Construction ResultApproval allowed only within the constructor. +// +// All errors indicate a valid Collection cannot be constructed from the input. +func NewResultApproval(untrusted UntrustedResultApproval) (*ResultApproval, error) { + rab, err := NewResultApprovalBody(UntrustedResultApprovalBody(untrusted.Body)) + if err != nil { + return nil, fmt.Errorf("invalid result approval body: %w", err) + } + + if len(untrusted.VerifierSignature) == 0 { + return nil, fmt.Errorf("VerifierSignature must not be empty") + } + + return &ResultApproval{ + Body: *rab, + VerifierSignature: untrusted.VerifierSignature, + }, nil +} + // ID generates a unique identifier using result approval body func (ra ResultApproval) ID() Identifier { return MakeID(ra.Body) diff --git a/model/flow/resultApproval_test.go b/model/flow/resultApproval_test.go index ee6b52ad922..7608faec457 100644 --- a/model/flow/resultApproval_test.go +++ b/model/flow/resultApproval_test.go @@ -3,12 +3,289 @@ package flow_test import ( "testing" + "github.com/onflow/crypto" "github.com/stretchr/testify/assert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) +const chunkIdx = uint64(7) + +// TestNewAttestation verifies that NewAttestation constructs a valid Attestation +// when given complete, non-zero fields, and returns an error when any required +// field is missing. +// It covers: +// - valid attestation creation +// - missing BlockID +// - missing ExecutionResultID +func TestNewAttestation(t *testing.T) { + + t.Run("valid attestation", func(t *testing.T) { + blockID := unittest.IdentifierFixture() + resultID := unittest.IdentifierFixture() + + ua := flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: resultID, + ChunkIndex: chunkIdx, + } + + at, err := flow.NewAttestation(ua) + assert.NoError(t, err) + assert.NotNil(t, at) + assert.Equal(t, blockID, at.BlockID) + assert.Equal(t, resultID, at.ExecutionResultID) + assert.Equal(t, chunkIdx, at.ChunkIndex) + }) + + t.Run("missing BlockID", func(t *testing.T) { + resultID := unittest.IdentifierFixture() + + ua := flow.UntrustedAttestation{ + BlockID: flow.ZeroID, + ExecutionResultID: resultID, + ChunkIndex: chunkIdx, + } + + at, err := flow.NewAttestation(ua) + assert.Error(t, err) + assert.Nil(t, at) + assert.Contains(t, err.Error(), "BlockID") + }) + + t.Run("missing ExecutionResultID", func(t *testing.T) { + blockID := unittest.IdentifierFixture() + + ua := flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: flow.ZeroID, + ChunkIndex: chunkIdx, + } + + at, err := flow.NewAttestation(ua) + assert.Error(t, err) + assert.Nil(t, at) + assert.Contains(t, err.Error(), "ExecutionResultID") + }) +} + +// TestNewResultApprovalBody checks that NewResultApprovalBody builds a valid +// ResultApprovalBody when given a correct Attestation and non-empty +// fields, and returns errors for invalid nested Attestation or missing fields. +// It covers: +// - valid result approval body creation +// - invalid nested Attestation +// - missing ApproverID +// - missing AttestationSignature +// - missing Spock proof +func TestNewResultApprovalBody(t *testing.T) { + blockID := unittest.IdentifierFixture() + resultID := unittest.IdentifierFixture() + approver := unittest.IdentifierFixture() + attestSig := unittest.SignatureFixture() + spockSig := unittest.SignatureFixture() + + t.Run("valid result approval body", func(t *testing.T) { + att, err := flow.NewAttestation(flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: resultID, + ChunkIndex: chunkIdx, + }) + assert.NoError(t, err) + + uc := flow.UntrustedResultApprovalBody{ + Attestation: *att, + ApproverID: approver, + AttestationSignature: attestSig, + Spock: spockSig, + } + + rab, err := flow.NewResultApprovalBody(uc) + assert.NoError(t, err) + assert.NotNil(t, rab) + assert.Equal(t, *att, rab.Attestation) + assert.Equal(t, approver, rab.ApproverID) + assert.Equal(t, attestSig, rab.AttestationSignature) + assert.Equal(t, spockSig, rab.Spock) + }) + + t.Run("invalid attestation", func(t *testing.T) { + uc := flow.UntrustedResultApprovalBody{ + Attestation: flow.Attestation{ + BlockID: flow.ZeroID, + ExecutionResultID: resultID, + ChunkIndex: chunkIdx, + }, + ApproverID: approver, + AttestationSignature: attestSig, + Spock: spockSig, + } + + rab, err := flow.NewResultApprovalBody(uc) + assert.Error(t, err) + assert.Nil(t, rab) + assert.Contains(t, err.Error(), "attestation") + }) + + t.Run("empty ApproverID", func(t *testing.T) { + att, err := flow.NewAttestation(flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: resultID, + ChunkIndex: chunkIdx, + }) + assert.NoError(t, err) + + uc := flow.UntrustedResultApprovalBody{ + Attestation: *att, + ApproverID: flow.ZeroID, + AttestationSignature: attestSig, + Spock: spockSig, + } + + rab, err := flow.NewResultApprovalBody(uc) + assert.Error(t, err) + assert.Nil(t, rab) + assert.Contains(t, err.Error(), "ApproverID") + }) + + t.Run("empty AttestationSignature", func(t *testing.T) { + att, err := flow.NewAttestation(flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: resultID, + ChunkIndex: chunkIdx, + }) + assert.NoError(t, err) + + uc := flow.UntrustedResultApprovalBody{ + Attestation: *att, + ApproverID: approver, + AttestationSignature: crypto.Signature{}, + Spock: spockSig, + } + + rab, err := flow.NewResultApprovalBody(uc) + assert.Error(t, err) + assert.Nil(t, rab) + assert.Contains(t, err.Error(), "AttestationSignature") + }) + + t.Run("empty Spock proof", func(t *testing.T) { + att, err := flow.NewAttestation(flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: resultID, + ChunkIndex: chunkIdx, + }) + assert.NoError(t, err) + + uc := flow.UntrustedResultApprovalBody{ + Attestation: *att, + ApproverID: approver, + AttestationSignature: attestSig, + Spock: crypto.Signature{}, + } + + rab, err := flow.NewResultApprovalBody(uc) + assert.Error(t, err) + assert.Nil(t, rab) + assert.Contains(t, err.Error(), "Spock") + }) +} + +// TestNewResultApproval ensures NewResultApproval combines a valid +// ResultApprovalBody and VerifierSignature into a ResultApproval, and returns +// errors for invalid ResultApprovalBody or missing VerifierSignature. +// It covers: +// - valid result approval creation +// - invalid ResultApprovalBody +// - missing verifier signature +func TestNewResultApproval(t *testing.T) { + blockID := unittest.IdentifierFixture() + execResID := unittest.IdentifierFixture() + approver := unittest.IdentifierFixture() + attestSig := unittest.SignatureFixture() + spockSig := unittest.SignatureFixture() + verifierSig := unittest.SignatureFixture() + + t.Run("valid result approval", func(t *testing.T) { + att, err := flow.NewAttestation(flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: execResID, + ChunkIndex: chunkIdx, + }) + assert.NoError(t, err) + + rab, err := flow.NewResultApprovalBody(flow.UntrustedResultApprovalBody{ + Attestation: *att, + ApproverID: approver, + AttestationSignature: attestSig, + Spock: spockSig, + }) + assert.NoError(t, err) + + uv := flow.UntrustedResultApproval{ + Body: *rab, + VerifierSignature: verifierSig, + } + + ra, err := flow.NewResultApproval(uv) + assert.NoError(t, err) + assert.NotNil(t, ra) + assert.Equal(t, *rab, ra.Body) + assert.Equal(t, verifierSig, ra.VerifierSignature) + }) + + // An invalid ResultApprovalBody must cause NewResultApproval to error + t.Run("invalid body", func(t *testing.T) { + uv := flow.UntrustedResultApproval{ + Body: flow.ResultApprovalBody{ + Attestation: flow.Attestation{ + BlockID: flow.ZeroID, + ExecutionResultID: execResID, + ChunkIndex: chunkIdx, + }, + ApproverID: approver, + AttestationSignature: attestSig, + Spock: spockSig, + }, + VerifierSignature: verifierSig, + } + + ra, err := flow.NewResultApproval(uv) + assert.Error(t, err) + assert.Nil(t, ra) + assert.Contains(t, err.Error(), "invalid result approval body") + }) + + // Missing VerifierSignature must cause NewResultApproval to error + t.Run("empty verifier signature", func(t *testing.T) { + att, err := flow.NewAttestation(flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: execResID, + ChunkIndex: 3, + }) + assert.NoError(t, err) + + rab, err := flow.NewResultApprovalBody(flow.UntrustedResultApprovalBody{ + Attestation: *att, + ApproverID: approver, + AttestationSignature: attestSig, + Spock: spockSig, + }) + assert.NoError(t, err) + + uv := flow.UntrustedResultApproval{ + Body: *rab, + VerifierSignature: crypto.Signature{}, + } + + ra, err := flow.NewResultApproval(uv) + assert.Error(t, err) + assert.Nil(t, ra) + assert.Contains(t, err.Error(), "VerifierSignature") + }) +} + func TestResultApprovalEncode(t *testing.T) { ra := unittest.ResultApprovalFixture() id := ra.ID() diff --git a/model/flow/seal.go b/model/flow/seal.go index 300dea2b79f..d7baf8a2ed3 100644 --- a/model/flow/seal.go +++ b/model/flow/seal.go @@ -1,6 +1,9 @@ package flow -import "encoding/json" +import ( + "encoding/json" + "fmt" +) // A Seal is produced when an Execution Result (referenced by `ResultID`) for // particular block (referenced by `BlockID`) is committed into the chain. @@ -34,13 +37,49 @@ import "encoding/json" // Therefore, to retrieve valid blocks from storage, it is required that // the Seal.ID includes all fields with independent degrees of freedom // (such as AggregatedApprovalSigs). +// +//structwrite:immutable - mutations allowed only within the constructor type Seal struct { - BlockID Identifier - ResultID Identifier - FinalState StateCommitment + BlockID Identifier + ResultID Identifier + FinalState StateCommitment + // AggregatedApprovalSigs can be nil/empty when verification is disabled or for the root seal. AggregatedApprovalSigs []AggregatedSignature // one AggregatedSignature per chunk } +// UntrustedSeal is an untrusted input-only representation of a Seal, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedSeal should be validated and converted into +// a trusted Seal using NewSeal constructor. +type UntrustedSeal Seal + +// NewSeal creates a new instance of Seal. +// Construction Seal allowed only within the constructor. +// +// All errors indicate a valid Seal cannot be constructed from the input. +func NewSeal(untrusted UntrustedSeal) (*Seal, error) { + if untrusted.BlockID == ZeroID { + return nil, fmt.Errorf("block ID must not be zero") + } + if untrusted.ResultID == ZeroID { + return nil, fmt.Errorf("result ID must not be zero") + } + if untrusted.FinalState == EmptyStateCommitment { + return nil, fmt.Errorf("final state must not be empty") + } + return &Seal{ + BlockID: untrusted.BlockID, + ResultID: untrusted.ResultID, + FinalState: untrusted.FinalState, + AggregatedApprovalSigs: untrusted.AggregatedApprovalSigs, + }, nil +} + func (s Seal) Body() interface{} { return struct { BlockID Identifier diff --git a/model/flow/seal_test.go b/model/flow/seal_test.go index a980192b323..97d6c41e1bc 100644 --- a/model/flow/seal_test.go +++ b/model/flow/seal_test.go @@ -4,7 +4,9 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) @@ -24,3 +26,76 @@ func Test_SealID(t *testing.T) { assert.NotEqual(t, id, seal.ID()) assert.NotEqual(t, cs, seal.Checksum()) } + +// TestNewSeal verifies the behavior of the NewSeal constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedSeal results in a valid Seal. +// +// 2. Invalid input with zero block ID: +// - Ensures an error is returned when the BlockID is zero. +// +// 3. Invalid input with zero result ID: +// - Ensures an error is returned when the ResultID is zero. +// +// 4. Invalid input with empty final state: +// - Ensures an error is returned when the FinalState is empty. +func TestNewSeal(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + seal, err := flow.NewSeal( + flow.UntrustedSeal{ + BlockID: unittest.IdentifierFixture(), + ResultID: unittest.IdentifierFixture(), + FinalState: unittest.StateCommitmentFixture(), + AggregatedApprovalSigs: unittest.Seal.AggregatedSignatureFixtures(3), + }, + ) + require.NoError(t, err) + require.NotNil(t, seal) + }) + + t.Run("invalid input, block ID is zero", func(t *testing.T) { + seal, err := flow.NewSeal( + flow.UntrustedSeal{ + BlockID: flow.ZeroID, + ResultID: unittest.IdentifierFixture(), + FinalState: unittest.StateCommitmentFixture(), + AggregatedApprovalSigs: unittest.Seal.AggregatedSignatureFixtures(3), + }, + ) + require.Error(t, err) + require.Nil(t, seal) + assert.Contains(t, err.Error(), "block ID must not be zero") + }) + + t.Run("invalid input, result ID is zero", func(t *testing.T) { + seal, err := flow.NewSeal( + flow.UntrustedSeal{ + BlockID: unittest.IdentifierFixture(), + ResultID: flow.ZeroID, + FinalState: unittest.StateCommitmentFixture(), + AggregatedApprovalSigs: unittest.Seal.AggregatedSignatureFixtures(3), + }, + ) + require.Error(t, err) + require.Nil(t, seal) + assert.Contains(t, err.Error(), "result ID must not be zero") + }) + + t.Run("invalid input, final state is empty", func(t *testing.T) { + seal, err := flow.NewSeal( + flow.UntrustedSeal{ + BlockID: unittest.IdentifierFixture(), + ResultID: unittest.IdentifierFixture(), + FinalState: flow.EmptyStateCommitment, + AggregatedApprovalSigs: unittest.Seal.AggregatedSignatureFixtures(3), + }, + ) + require.Error(t, err) + require.Nil(t, seal) + assert.Contains(t, err.Error(), "final state must not be empty") + }) +} diff --git a/model/flow/sealing_segment.go b/model/flow/sealing_segment.go index b86a1889507..002aa9548e9 100644 --- a/model/flow/sealing_segment.go +++ b/model/flow/sealing_segment.go @@ -1,6 +1,7 @@ package flow import ( + "bytes" "errors" "fmt" @@ -54,10 +55,11 @@ type SealingSegment struct { // the lowest one, are contained in the blocks of the sealing segment. LatestSeals map[Identifier]Identifier - // FirstSeal contains the latest seal as of the first block in the segment. - // Per convention, this field holds a seal that was included _prior_ to the - // first block of the sealing segment. If the first block in the segment - // contains a seal, then this field is `nil`. + // FirstSeal contains the latest seal in the fork as of the first block in the + // segment, i.e. `Blocks[0]`. Per convention, this field is `nil` if and only if + // the first block in the segment contains a seal. In other words, `FirstSeal` + // holding a non-nil value indicates that `Blocks[0]` did not seal any blocks, + // i.e. the latest sealed block as of `Blocks[0]` is carried over from an ancestor. // This information is needed for the `Commit` method of protocol snapshot // to return the sealed state, when the first block contains no seal. FirstSeal *Seal @@ -291,12 +293,6 @@ func (builder *SealingSegmentBuilder) AddBlock(block *Block) error { } blockID := block.ID() - // a block might contain receipts or seals that refer to results that are included in blocks - // whose height is below the first block of the segment. - // In order to include those missing results into the segment, we construct a list of those - // missing result IDs referenced by this block - missingResultIDs := make(map[Identifier]struct{}) - // for the first (lowest) block, if it contains no seal, store the latest // seal incorporated prior to the first block if len(builder.blocks) == 0 { @@ -306,8 +302,6 @@ func (builder *SealingSegmentBuilder) AddBlock(block *Block) error { return fmt.Errorf("could not look up seal: %w", err) } builder.firstSeal = seal - // add first seal result ID here, since it isn't in payload - missingResultIDs[seal.ResultID] = struct{}{} } } @@ -324,28 +318,6 @@ func (builder *SealingSegmentBuilder) AddBlock(block *Block) error { builder.includedResults[result.ID()] = struct{}{} } - for _, receipt := range block.Payload.Receipts { - if _, ok := builder.includedResults[receipt.ResultID]; !ok { - missingResultIDs[receipt.ResultID] = struct{}{} - } - } - for _, seal := range block.Payload.Seals { - if _, ok := builder.includedResults[seal.ResultID]; !ok { - missingResultIDs[seal.ResultID] = struct{}{} - } - } - - // add the missing results - for resultID := range missingResultIDs { - result, err := builder.resultLookup(resultID) - - if err != nil { - return fmt.Errorf("could not look up result with id=%x: %w", resultID, err) - } - builder.addExecutionResult(result) - builder.includedResults[resultID] = struct{}{} - } - // if the block commits to an unseen ProtocolStateID, add the corresponding data entry err = builder.addProtocolStateEntryIfUnseen(block.Payload.ProtocolStateID) if err != nil { @@ -405,6 +377,7 @@ func (builder *SealingSegmentBuilder) AddExtraBlock(block *Block) error { // AddExecutionResult adds result to executionResults func (builder *SealingSegmentBuilder) addExecutionResult(result *ExecutionResult) { builder.results = append(builder.results, result) + builder.includedResults[result.ID()] = struct{}{} } // SealingSegment completes building the sealing segment, validating the segment @@ -413,6 +386,53 @@ func (builder *SealingSegmentBuilder) addExecutionResult(result *ExecutionResult // Errors expected during normal operation: // - InvalidSealingSegmentError if the added block would cause an invalid resulting segment func (builder *SealingSegmentBuilder) SealingSegment() (*SealingSegment, error) { + + // at this point, go through all blocks and store any results which are referenced + // by blocks in the segment, but not contained within any blocks in the segment + missingExecutionResultMap := make(map[Identifier]struct{}) + + if builder.firstSeal != nil { + _, ok := builder.includedResults[builder.firstSeal.ResultID] + if !ok { + missingExecutionResultMap[builder.firstSeal.ResultID] = struct{}{} + } + } + + for _, block := range append(builder.extraBlocks, builder.blocks...) { + for _, receipt := range block.Payload.Receipts { + _, included := builder.includedResults[receipt.ResultID] + if included { + continue + } + missingExecutionResultMap[receipt.ResultID] = struct{}{} + } + for _, seal := range block.Payload.Seals { + _, included := builder.includedResults[seal.ResultID] + if included { + continue + } + missingExecutionResultMap[seal.ResultID] = struct{}{} + } + } + + // sort execution results to canonical order for consistent serialization + missingExecutionResults := make([]Identifier, 0, len(missingExecutionResultMap)) + for resultID := range missingExecutionResultMap { + missingExecutionResults = append(missingExecutionResults, resultID) + } + slices.SortFunc(missingExecutionResults, func(a, b Identifier) int { + return bytes.Compare(a[:], b[:]) + }) + + // retrieve and store all missing execution results + for _, resultID := range missingExecutionResults { + result, err := builder.resultLookup(resultID) + if err != nil { + return nil, fmt.Errorf("could not retrieve missing result (id=%x): %w", resultID, err) + } + builder.addExecutionResult(result) + } + if err := builder.validateSegment(); err != nil { return nil, fmt.Errorf("failed to validate sealing segment: %w", err) } diff --git a/model/flow/sealing_segment.md b/model/flow/sealing_segment.md index cc956754d85..0ccc0c9db60 100644 --- a/model/flow/sealing_segment.md +++ b/model/flow/sealing_segment.md @@ -144,9 +144,7 @@ the segment. In particular: ## Outlook -In its current state, the sealing segment has been evolving driven by different needs. Most likely, there is some room for simplifications -and other improvements. However, an important aspect of the sealing segment is to allow newly-joining nodes to build an internal representation -of the protocol state, in particular the identity table. There are large changes coming around when we move to the dynamic identity table. -Therefore, we accept that the Sealing Segment currently has some technical debt and unnecessary complexity. Once we have implemented the -dynamic identity table, we will have a much more solidified understanding of the data in the sealing segment. +An important aspect of the sealing segment is to allow newly-joining nodes to build an internal representation +of the protocol state, in particular the identity table. In its current state, the sealing segment has been evolving +driven by different needs. Most likely, there is some room for simplifications and other improvements. diff --git a/model/flow/sealing_segment_test.go b/model/flow/sealing_segment_test.go index c5f15b73ff8..cb8f62d57b6 100644 --- a/model/flow/sealing_segment_test.go +++ b/model/flow/sealing_segment_test.go @@ -544,6 +544,9 @@ func TestAddBlock_StorageError(t *testing.T) { )) err := builder.AddBlock(&block1) + require.NoError(t, err) + + _, err = builder.SealingSegment() require.ErrorIs(t, err, exception) }) diff --git a/model/flow/timeout_certificate.go b/model/flow/timeout_certificate.go index e7a4df39058..65425fd52ea 100644 --- a/model/flow/timeout_certificate.go +++ b/model/flow/timeout_certificate.go @@ -1,10 +1,16 @@ package flow -import "github.com/onflow/crypto" +import ( + "fmt" + + "github.com/onflow/crypto" +) // TimeoutCertificate proves that a super-majority of consensus participants want to abandon the specified View. // At its core, a timeout certificate is an aggregation of TimeoutObjects, which individual nodes send to signal // their intent to leave the active view. +// +//structwrite:immutable - mutations allowed only within the constructor type TimeoutCertificate struct { View uint64 // NewestQCViews lists for each signer (in the same order) the view of the newest QC they supplied @@ -21,6 +27,65 @@ type TimeoutCertificate struct { SigData crypto.Signature } +// UntrustedTimeoutCertificate is an untrusted input-only representation of a TimeoutCertificate, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedTimeoutCertificate should be validated and converted into +// a trusted TimeoutCertificate using NewTimeoutCertificate constructor. +type UntrustedTimeoutCertificate TimeoutCertificate + +// NewTimeoutCertificate creates a new instance of TimeoutCertificate. +// Construction TimeoutCertificate allowed only within the constructor. +// +// All errors indicate a valid TimeoutCertificate cannot be constructed from the input. +func NewTimeoutCertificate(untrusted UntrustedTimeoutCertificate) (*TimeoutCertificate, error) { + if untrusted.NewestQC == nil { + return nil, fmt.Errorf("newest QC must not be nil") + } + if len(untrusted.SignerIndices) == 0 { + return nil, fmt.Errorf("signer indices must not be empty") + } + if len(untrusted.SigData) == 0 { + return nil, fmt.Errorf("signature must not be empty") + } + + // The TC's view cannot be smaller than the view of the QC it contains. + // Note: we specifically allow for the TC to have the same view as the highest QC. + // This is useful as a fallback, because it allows replicas other than the designated + // leader to also collect votes and generate a QC. + if untrusted.View < untrusted.NewestQC.View { + return nil, fmt.Errorf("TC's QC view (%d) cannot be newer than the TC's view (%d)", untrusted.NewestQC.View, untrusted.View) + } + + // verifying that tc.NewestQC is the QC with the highest view. + // Note: A byzantine TC could include `nil` for tc.NewestQCViews + if len(untrusted.NewestQCViews) == 0 { + return nil, fmt.Errorf("newest QC views must not be empty") + } + + newestQCView := untrusted.NewestQCViews[0] + for _, view := range untrusted.NewestQCViews { + if newestQCView < view { + newestQCView = view + } + } + if newestQCView > untrusted.NewestQC.View { + return nil, fmt.Errorf("included QC (view=%d) should be equal or higher to highest contributed view: %d", untrusted.NewestQC.View, newestQCView) + } + + return &TimeoutCertificate{ + View: untrusted.View, + NewestQCViews: untrusted.NewestQCViews, + NewestQC: untrusted.NewestQC, + SignerIndices: untrusted.SignerIndices, + SigData: untrusted.SigData, + }, nil +} + // ID returns the TimeoutCertificate's identifier func (t *TimeoutCertificate) ID() Identifier { if t == nil { diff --git a/model/flow/timeout_certificate_test.go b/model/flow/timeout_certificate_test.go new file mode 100644 index 00000000000..53ad0aeecea --- /dev/null +++ b/model/flow/timeout_certificate_test.go @@ -0,0 +1,156 @@ +package flow_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/consensus/hotstuff/helper" + "github.com/onflow/flow-go/model/flow" +) + +// TestNewTimeoutCertificate verifies the behavior of the NewTimeoutCertificate constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedTimeoutCertificate results in a valid TimeoutCertificate. +// +// 2. Invalid input with nil NewestQC: +// - Ensures an error is returned when the NewestQC field is nil. +// +// 3. Invalid input with nil SignerIndices: +// - Ensures an error is returned when the SignerIndices field is nil. +// +// 4. Invalid input with empty SignerIndices: +// - Ensures an error is returned when the SignerIndices field is an empty slice. +// +// 5. Invalid input with nil SigData: +// - Ensures an error is returned when the SigData field is nil. +// +// 6. Invalid input with empty SigData: +// - Ensures an error is returned when the SigData field is an empty byte slice. +// +// 7. Invalid input with nil NewestQCViews: +// - Ensures an error is returned when the NewestQCViews field is nil. +// +// 8. Invalid input with empty NewestQCViews: +// - Ensures an error is returned when the NewestQCViews field is an empty slice. +// +// 9. Invalid input when the View is lower than NewestQC's View: +// - Ensures an error is returned when the TimeoutCertificate's View is less than the included QuorumCertificate's View. +// +// 10. Invalid input when NewestQCViews contains view higher than NewestQC.View: +// - Ensures an error is returned if NewestQCViews includes a view that exceeds the view of the NewestQC. +func TestNewTimeoutCertificate(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + validTC := helper.MakeTC() + tc, err := flow.NewTimeoutCertificate( + flow.UntrustedTimeoutCertificate(*validTC), + ) + require.NoError(t, err) + require.NotNil(t, tc) + }) + + t.Run("invalid input with nil NewestQC", func(t *testing.T) { + tc := helper.MakeTC() + tc.NewestQC = nil + + res, err := flow.NewTimeoutCertificate(flow.UntrustedTimeoutCertificate(*tc)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "newest QC must not be nil") + }) + + t.Run("invalid input with nil SignerIndices", func(t *testing.T) { + tc := helper.MakeTC() + tc.SignerIndices = nil + + res, err := flow.NewTimeoutCertificate(flow.UntrustedTimeoutCertificate(*tc)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "signer indices must not be empty") + }) + + t.Run("invalid input with empty SignerIndices", func(t *testing.T) { + tc := helper.MakeTC() + tc.SignerIndices = []byte{} + + res, err := flow.NewTimeoutCertificate(flow.UntrustedTimeoutCertificate(*tc)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "signer indices must not be empty") + }) + + t.Run("invalid input with nil SigData", func(t *testing.T) { + tc := helper.MakeTC() + tc.SigData = nil + + res, err := flow.NewTimeoutCertificate(flow.UntrustedTimeoutCertificate(*tc)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "signature must not be empty") + }) + + t.Run("invalid input with empty SigData", func(t *testing.T) { + tc := helper.MakeTC() + tc.SigData = []byte{} + + res, err := flow.NewTimeoutCertificate(flow.UntrustedTimeoutCertificate(*tc)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "signature must not be empty") + }) + + t.Run("invalid input with nil NewestQCViews", func(t *testing.T) { + tc := helper.MakeTC() + tc.NewestQCViews = nil + + res, err := flow.NewTimeoutCertificate(flow.UntrustedTimeoutCertificate(*tc)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "newest QC views must not be empty") + }) + + t.Run("invalid input with empty NewestQCViews", func(t *testing.T) { + tc := helper.MakeTC() + tc.NewestQCViews = []uint64{} + + res, err := flow.NewTimeoutCertificate(flow.UntrustedTimeoutCertificate(*tc)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "newest QC views must not be empty") + }) + + t.Run("invalid input with TC.View < QC.View", func(t *testing.T) { + qc := helper.MakeQC(helper.WithQCView(100)) + tc, err := flow.NewTimeoutCertificate( + flow.UntrustedTimeoutCertificate( + *helper.MakeTC( + helper.WithTCView(99), + helper.WithTCNewestQC(qc), + )), + ) + require.Error(t, err) + require.Nil(t, tc) + assert.Contains(t, err.Error(), "TC's QC view (100) cannot be newer than the TC's view (99)") + }) + + t.Run("invalid input when NewestQCViews has view higher than NewestQC.View", func(t *testing.T) { + qc := helper.MakeQC(helper.WithQCView(50)) + tc, err := flow.NewTimeoutCertificate( + flow.UntrustedTimeoutCertificate( + *helper.MakeTC( + helper.WithTCView(51), + helper.WithTCNewestQC(qc), + helper.WithTCHighQCViews([]uint64{40, 50, 60}), // highest = 60 > QC.View = 50 + ), + ), + ) + require.Error(t, err) + require.Nil(t, tc) + assert.Contains(t, err.Error(), "included QC (view=50) should be equal or higher to highest contributed view: 60") + }) +} diff --git a/model/verification/chunkDataPackRequest.go b/model/verification/chunkDataPackRequest.go index f613750cbcb..52a257e982b 100644 --- a/model/verification/chunkDataPackRequest.go +++ b/model/verification/chunkDataPackRequest.go @@ -10,11 +10,51 @@ import ( // ChunkDataPackRequest is an internal data structure in fetcher engine that is passed between the engine // and requester module. It conveys required information for requesting a chunk data pack. +// +//structwrite:immutable - mutations allowed only within the constructor type ChunkDataPackRequest struct { chunks.Locator // uniquely identifies chunk ChunkDataPackRequestInfo } +// UntrustedChunkDataPackRequest is an untrusted input-only representation of a ChunkDataPackRequest, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedChunkDataPackRequest should be validated and converted into +// a trusted ChunkDataPackRequest using NewChunkDataPackRequest constructor. +type UntrustedChunkDataPackRequest ChunkDataPackRequest + +// NewChunkDataPackRequest creates a new instance of ChunkDataPackRequest. +// Construction ChunkDataPackRequest allowed only within the constructor. +// +// All errors indicate a valid ChunkDataPackRequest cannot be constructed from the input. +func NewChunkDataPackRequest(untrusted UntrustedChunkDataPackRequest) (*ChunkDataPackRequest, error) { + if untrusted.Locator.EqualTo(new(chunks.Locator)) { + return nil, fmt.Errorf("locator is empty") + } + if untrusted.ChunkID == flow.ZeroID { + return nil, fmt.Errorf("chunk ID must not be zero") + } + if len(untrusted.Agrees) == 0 { + return nil, fmt.Errorf("agrees list must not be empty") + } + if len(untrusted.Targets) == 0 { + return nil, fmt.Errorf("targets list must not be empty") + } + filteredTargets := untrusted.Targets.Filter(filter.HasRole[flow.Identity](flow.RoleExecution)) + if len(filteredTargets) < len(untrusted.Targets) { + return nil, fmt.Errorf("only execution nodes identities must be provided in target list: %v", untrusted.Targets) + } + return &ChunkDataPackRequest{ + Locator: untrusted.Locator, + ChunkDataPackRequestInfo: untrusted.ChunkDataPackRequestInfo, + }, nil +} + type ChunkDataPackRequestInfo struct { ChunkID flow.Identifier Height uint64 // block height of execution result of the chunk, used to drop chunk requests of sealed heights. diff --git a/model/verification/chunkDataPackRequest_test.go b/model/verification/chunkDataPackRequest_test.go index 89e65c39824..f681f03383c 100644 --- a/model/verification/chunkDataPackRequest_test.go +++ b/model/verification/chunkDataPackRequest_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/verification" "github.com/onflow/flow-go/utils/unittest" @@ -66,3 +67,144 @@ func TestChunkDataPackRequestList_UniqueRequestInfo(t *testing.T) { otherChunkIDReqInfo := reqInfoMap[otherChunkID] require.Equal(t, *otherChunkIDReqInfo, otherReq.ChunkDataPackRequestInfo) } + +// TestNewChunkDataPackRequest tests the NewChunkDataPackRequest constructor with valid and invalid inputs. +// +// Valid Case: +// +// 1. Valid input with non-empty locator, non-zero ChunkID, and all required lists: +// - Should successfully construct a ChunkDataPackRequest. +// +// Invalid Cases: +// +// 2. Invalid input with empty locator: +// - Should return an error indicating the locator is empty. +// +// 3. Invalid input with zero ChunkID: +// - Should return an error indicating chunk ID must not be zero. +// +// 4. Invalid input with empty and nil Agrees list: +// - Should return an error indicating agrees list must not be empty. +// +// 5. Invalid input with empty and nil Targets list: +// - Should return an error indicating targets list must not be empty. +// +// 6. Invalid input with non-execution node in Targets list: +// - Should return an error indicating only execution node identities are allowed in the Targets list. +func TestNewChunkDataPackRequest(t *testing.T) { + chunkDataPackRequestInfo := unittest.ChunkDataPackRequestInfoFixture() + locator := *unittest.ChunkLocatorFixture(unittest.IdentifierFixture(), 0) + + t.Run("valid input with all required fields", func(t *testing.T) { + request, err := verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: locator, + ChunkDataPackRequestInfo: *chunkDataPackRequestInfo, + }, + ) + + require.NoError(t, err) + require.NotNil(t, request) + }) + + t.Run("invalid input with empty locator", func(t *testing.T) { + _, err := verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: chunks.Locator{}, + ChunkDataPackRequestInfo: *chunkDataPackRequestInfo, + }, + ) + + require.Error(t, err) + require.Contains(t, err.Error(), "locator is empty") + }) + + t.Run("invalid input with zero chunk ID", func(t *testing.T) { + info := *chunkDataPackRequestInfo + info.ChunkID = flow.ZeroID + + _, err := verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: locator, + ChunkDataPackRequestInfo: info, + }, + ) + + require.Error(t, err) + require.Contains(t, err.Error(), "chunk ID must not be zero") + }) + + t.Run("input with invalid agrees", func(t *testing.T) { + info := *chunkDataPackRequestInfo + + // with nil agrees + info.Agrees = nil + _, err := verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: locator, + ChunkDataPackRequestInfo: info, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "agrees list must not be empty") + + // with empty agrees + info.Agrees = flow.IdentifierList{} + _, err = verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: locator, + ChunkDataPackRequestInfo: info, + }, + ) + + require.Error(t, err) + require.Contains(t, err.Error(), "agrees list must not be empty") + }) + + t.Run("input with invalid targets", func(t *testing.T) { + info := *chunkDataPackRequestInfo + + // with nil targets + info.Targets = nil + _, err := verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: locator, + ChunkDataPackRequestInfo: info, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "targets list must not be empty") + + // with empty targets + info.Targets = flow.IdentityList{} + _, err = verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: locator, + ChunkDataPackRequestInfo: info, + }, + ) + + require.Error(t, err) + require.Contains(t, err.Error(), "targets list must not be empty") + }) + + t.Run("invalid input with non-execution node in targets list", func(t *testing.T) { + info := *chunkDataPackRequestInfo + + // Append a non-execution identity + info.Targets = append(info.Targets, unittest.IdentityFixture( + unittest.WithNodeID(unittest.IdentifierFixture()), + unittest.WithRole(flow.RoleAccess), + )) + + _, err := verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: locator, + ChunkDataPackRequestInfo: info, + }, + ) + + require.Error(t, err) + require.Contains(t, err.Error(), "only execution nodes identities must be provided in target list") + }) +} diff --git a/model/verification/chunkDataPackResponse.go b/model/verification/chunkDataPackResponse.go index 1aa6412ac8b..f5bb639e77e 100644 --- a/model/verification/chunkDataPackResponse.go +++ b/model/verification/chunkDataPackResponse.go @@ -1,6 +1,8 @@ package verification import ( + "fmt" + "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" ) @@ -8,7 +10,37 @@ import ( // ChunkDataPackResponse is an internal data structure in fetcher engine that is passed between the fetcher // and requester engine. It conveys requested chunk data pack as well as meta-data for fetcher engine to // process the chunk data pack. +// +//structwrite:immutable - mutations allowed only within the constructor type ChunkDataPackResponse struct { chunks.Locator Cdp *flow.ChunkDataPack } + +// UntrustedChunkDataPackResponse is an untrusted input-only representation of a ChunkDataPackResponse, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedChunkDataPackResponse should be validated and converted into +// a trusted ChunkDataPackResponse using NewChunkDataPackResponse constructor. +type UntrustedChunkDataPackResponse ChunkDataPackResponse + +// NewChunkDataPackResponse creates a new instance of ChunkDataPackResponse. +// Construction ChunkDataPackResponse allowed only within the constructor. +// +// All errors indicate a valid ChunkDataPackResponse cannot be constructed from the input. +func NewChunkDataPackResponse(untrusted UntrustedChunkDataPackResponse) (*ChunkDataPackResponse, error) { + if untrusted.Locator.EqualTo(new(chunks.Locator)) { + return nil, fmt.Errorf("locator is empty") + } + if untrusted.Cdp == nil { + return nil, fmt.Errorf("chunk data pack must not be nil") + } + return &ChunkDataPackResponse{ + Locator: untrusted.Locator, + Cdp: untrusted.Cdp, + }, nil +} diff --git a/model/verification/chunkDataPackResponse_test.go b/model/verification/chunkDataPackResponse_test.go new file mode 100644 index 00000000000..9ebc35e1d05 --- /dev/null +++ b/model/verification/chunkDataPackResponse_test.go @@ -0,0 +1,62 @@ +package verification_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/chunks" + "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewChunkDataPackResponse tests the NewChunkDataPackResponse constructor with valid and invalid inputs. +// +// Valid Case: +// +// 1. Valid input with non-empty locator and non-nil ChunkDataPack: +// - Should successfully construct a ChunkDataPackResponse. +// +// Invalid Cases: +// +// 2. Invalid input with empty locator: +// - Should return an error indicating the locator is empty. +// +// 3. Invalid input with nil ChunkDataPack: +// - Should return an error indicating the chunk data pack must not be nil. +func TestNewChunkDataPackResponse(t *testing.T) { + t.Run("valid input with locator and chunk data pack", func(t *testing.T) { + response, err := verification.NewChunkDataPackResponse( + verification.UntrustedChunkDataPackResponse{ + Locator: *unittest.ChunkLocatorFixture(unittest.IdentifierFixture(), 0), + Cdp: unittest.ChunkDataPackFixture(unittest.IdentifierFixture()), + }) + + require.NoError(t, err) + require.NotNil(t, response) + }) + + t.Run("invalid input with empty locator", func(t *testing.T) { + _, err := verification.NewChunkDataPackResponse( + verification.UntrustedChunkDataPackResponse{ + Locator: chunks.Locator{}, // empty locator + Cdp: unittest.ChunkDataPackFixture(unittest.IdentifierFixture()), + }, + ) + + require.Error(t, err) + require.Contains(t, err.Error(), "locator is empty") + }) + + t.Run("invalid input with nil chunk data pack", func(t *testing.T) { + _, err := verification.NewChunkDataPackResponse( + verification.UntrustedChunkDataPackResponse{ + Locator: *unittest.ChunkLocatorFixture(unittest.IdentifierFixture(), 0), + Cdp: nil, + }, + ) + + require.Error(t, err) + require.Contains(t, err.Error(), "chunk data pack must not be nil") + }) +} diff --git a/module/block_iterator/creator_test.go b/module/block_iterator/creator_test.go index f9cc17b0490..b6af3d46511 100644 --- a/module/block_iterator/creator_test.go +++ b/module/block_iterator/creator_test.go @@ -377,3 +377,7 @@ func (m *mockProgress) SetProcessedIndex(processed uint64) error { m.index = processed return nil } + +func (m *mockProgress) BatchSetProcessedIndex(uint64, storage.ReaderBatchWriter) error { + return fmt.Errorf("batch not supported") +} diff --git a/module/block_iterator/executor/executor.go b/module/block_iterator/executor/executor.go index 585f02b917e..bfefdba8de2 100644 --- a/module/block_iterator/executor/executor.go +++ b/module/block_iterator/executor/executor.go @@ -43,120 +43,78 @@ func IterateExecuteAndCommitInBatch( // in order to minimize the impact on the system sleepAfterEachBatchCommit time.Duration, ) error { - batch := db.NewBatch() - defer func() { - // Close last batch. - // NOTE: batch variable is reused, so it refers to the last batch when defer is executed. - batch.Close() - }() - - iteratedCountInCurrentBatch := uint(0) - - startTime := time.Now() - total := 0 - defer func() { - log.Info().Str("duration", time.Since(startTime).String()). - Int("total_block_executed", total). - Msg("block iteration and execution completed") - }() - - for { + noMoreBlocks := false + + for !noMoreBlocks { select { - // when the context is done, commit the last batch and return case <-ctx.Done(): - if iteratedCountInCurrentBatch > 0 { - // commit the last batch - err := commitAndCheckpoint(log, metrics, batch, iter) - if err != nil { - return err - } - } return nil default: } - // iterate over each block until the end - blockID, hasNext, err := iter.Next() - if err != nil { - return err - } + start := time.Now() + iteratedCountInCurrentBatch := uint(0) - if !hasNext { - if iteratedCountInCurrentBatch > 0 { - // commit last batch - err := commitAndCheckpoint(log, metrics, batch, iter) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for { + // if the batch is full, commit it and enter the outer loop to + // start a new batch + if iteratedCountInCurrentBatch >= batchSize { + return nil + } + + blockID, hasNext, err := iter.Next() if err != nil { return err } - } + if !hasNext { + // no more blocks to iterate, we are done. + // update the flag and prepare to exit the loop after committing the last batch + noMoreBlocks = true + return nil + } - break - } + err = executor.ExecuteByBlockID(blockID, rw) + if err != nil { + return err + } - // execute all the data indexed by the block - err = executor.ExecuteByBlockID(blockID, batch) + iteratedCountInCurrentBatch++ + } + }) if err != nil { - return fmt.Errorf("failed to execute by block ID %v: %w", blockID, err) + return err } - iteratedCountInCurrentBatch++ - total++ - - // if batch is full, commit and sleep - if iteratedCountInCurrentBatch >= batchSize { - // commit the batch and save the progress - err := commitAndCheckpoint(log, metrics, batch, iter) - if err != nil { - return err - } - - // wait a bit to minimize the impact on the system - select { - case <-ctx.Done(): - return nil - case <-time.After(sleepAfterEachBatchCommit): - } - - // Close previous batch before creating a new batch. - batch.Close() - // create a new batch, and reset iteratedCountInCurrentBatch - batch = db.NewBatch() - iteratedCountInCurrentBatch = 0 + // save the progress of the iteration, so that it can be resumed later + _, err = iter.Checkpoint() + if err != nil { + return fmt.Errorf("failed to checkpoint iterator: %w", err) } - } - return nil -} + // report the progress of the iteration + startIndex, endIndex, nextIndex := iter.Progress() + progress := CalculateProgress(startIndex, endIndex, nextIndex) -// commitAndCheckpoint commits the batch and checkpoints the iterator -// so that the iteration progress can be resumed after restart. -func commitAndCheckpoint( - log zerolog.Logger, metrics module.ExecutionMetrics, batch storage.Batch, iter module.BlockIterator) error { - start := time.Now() + log.Info(). + Str("commit-dur", time.Since(start).String()). + Uint64("start-index", startIndex). + Uint64("end-index", endIndex). + Uint64("next-index", nextIndex). + Str("progress", fmt.Sprintf("%.2f%%", progress)). + Msg("batch committed") - err := batch.Commit() - if err != nil { - return fmt.Errorf("failed to commit batch: %w", err) - } + metrics.ExecutionLastChunkDataPackPrunedHeight(nextIndex - 1) - _, err = iter.Checkpoint() - if err != nil { - return fmt.Errorf("failed to checkpoint iterator: %w", err) + // sleep after each batch commit to minimize the impact on the system + select { + case <-ctx.Done(): + return nil + case <-time.After(sleepAfterEachBatchCommit): + // continue to next iteration + } } - startIndex, endIndex, nextIndex := iter.Progress() - progress := CalculateProgress(startIndex, endIndex, nextIndex) - - log.Info(). - Str("commit-dur", time.Since(start).String()). - Uint64("start-index", startIndex). - Uint64("end-index", endIndex). - Uint64("next-index", nextIndex). - Str("progress", fmt.Sprintf("%.2f%%", progress)). - Msg("batch committed") - - metrics.ExecutionLastChunkDataPackPrunedHeight(nextIndex - 1) - return nil } diff --git a/module/block_iterator/executor/executor_test.go b/module/block_iterator/executor/executor_test.go index 882a2aa96d7..da4716ccf87 100644 --- a/module/block_iterator/executor/executor_test.go +++ b/module/block_iterator/executor/executor_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" diff --git a/module/block_iterator/iterator_test.go b/module/block_iterator/iterator_test.go index 7d8ca35446b..9b8b8bb1bb0 100644 --- a/module/block_iterator/iterator_test.go +++ b/module/block_iterator/iterator_test.go @@ -4,19 +4,20 @@ import ( "fmt" "testing" - "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" - storagebadger "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" ) func TestIterateHeight(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { // create blocks with siblings b1 := &flow.Header{Height: 1} b2 := &flow.Header{Height: 2} @@ -25,7 +26,13 @@ func TestIterateHeight(t *testing.T) { // index height for _, b := range bs { - require.NoError(t, db.Update(operation.IndexBlockHeight(b.Height, b.ID()))) + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockFinalizeBlock)) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, b.Height, b.ID()) + })) + + lctx.Release() } progress := &saveNextHeight{} @@ -33,7 +40,7 @@ func TestIterateHeight(t *testing.T) { // create iterator // b0 is the root block, iterate from b1 to b3 iterRange := module.IteratorRange{Start: b1.Height, End: b3.Height} - headers := storagebadger.NewHeaders(&metrics.NoopCollector{}, db) + headers := store.NewHeaders(&metrics.NoopCollector{}, db) getBlockIDByIndex := func(height uint64) (flow.Identifier, bool, error) { blockID, err := headers.BlockIDByHeight(height) if err != nil { diff --git a/module/block_iterator/state_test.go b/module/block_iterator/state_test.go index 6927e9e67b8..7617f80fe62 100644 --- a/module/block_iterator/state_test.go +++ b/module/block_iterator/state_test.go @@ -3,7 +3,7 @@ package block_iterator import ( "testing" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/storage/operation/pebbleimpl" diff --git a/module/builder/collection/builder.go b/module/builder/collection/builder.go index 224f5234336..79ab9ea04cf 100644 --- a/module/builder/collection/builder.go +++ b/module/builder/collection/builder.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/onflow/flow-go/model/cluster" @@ -19,8 +19,8 @@ import ( "github.com/onflow/flow-go/state/fork" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/procedure" "github.com/onflow/flow-go/utils/logging" ) @@ -31,7 +31,8 @@ import ( // HotStuff event loop is the only consumer of this interface and is single // threaded, this is OK. type Builder struct { - db *badger.DB + db storage.DB + lockManager lockctx.Manager mainHeaders storage.Headers clusterHeaders storage.Headers protoState protocol.State @@ -48,8 +49,9 @@ type Builder struct { } func NewBuilder( - db *badger.DB, + db storage.DB, tracer module.Tracer, + lockManager lockctx.Manager, protoState protocol.State, clusterState clusterstate.State, mainHeaders storage.Headers, @@ -63,6 +65,7 @@ func NewBuilder( b := Builder{ db: db, tracer: tracer, + lockManager: lockManager, protoState: protoState, clusterState: clusterState, mainHeaders: mainHeaders, @@ -163,11 +166,18 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er return nil, fmt.Errorf("could not populate un-finalized ancestry lookout (parent_id=%x): %w", parentID, err) } + lctx := b.lockManager.NewContext() + defer lctx.Release() + err = lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock) + if err != nil { + return nil, err + } + // STEP 1b: create a lookup of all transactions previously included in // the finalized collections. Any transactions already included in finalized // collections can be removed from the mempool. span, _ = b.tracer.StartSpanFromContext(ctx, trace.COLBuildOnFinalizedLookup) - err = b.populateFinalizedAncestryLookup(buildCtx) + err = b.populateFinalizedAncestryLookup(lctx, buildCtx) span.End() if err != nil { return nil, fmt.Errorf("could not populate finalized ancestry lookup: %w", err) @@ -198,7 +208,10 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er // STEP 4: insert the cluster block to the database. span, _ = b.tracer.StartSpanFromContext(ctx, trace.COLBuildOnDBInsert) - err = operation.RetryOnConflict(b.db.Update, procedure.InsertClusterBlock(&proposal)) + + err = b.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.InsertClusterBlock(lctx, rw, &proposal) + }) span.End() if err != nil { return nil, fmt.Errorf("could not insert built block: %w", err) @@ -245,37 +258,34 @@ func (b *Builder) getBlockBuildContext(parentID flow.Identifier) (*blockBuildCon return ctx, nil } - // otherwise, attempt to read them from storage - err = b.db.View(func(btx *badger.Txn) error { - var refEpochFinalHeight uint64 - var refEpochFinalID flow.Identifier + r := b.db.Reader() - err = operation.RetrieveEpochLastHeight(b.clusterEpoch, &refEpochFinalHeight)(btx) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - return nil - } - return fmt.Errorf("unexpected failure to retrieve final height of operating epoch: %w", err) - } - err = operation.LookupBlockHeight(refEpochFinalHeight, &refEpochFinalID)(btx) - if err != nil { - // if we are able to retrieve the epoch's final height, the block must be finalized - // therefore failing to look up its height here is an unexpected error - return irrecoverable.NewExceptionf("could not retrieve ID of finalized final block of operating epoch: %w", err) - } + var refEpochFinalHeight uint64 + var refEpochFinalID flow.Identifier - // cache the values - b.epochFinalHeight = &refEpochFinalHeight - b.epochFinalID = &refEpochFinalID - // store the values in the build context - ctx.refEpochFinalID = b.epochFinalID - ctx.refEpochFinalHeight = b.epochFinalHeight + err = operation.RetrieveEpochLastHeight(r, b.clusterEpoch, &refEpochFinalHeight) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return ctx, nil + } + return nil, fmt.Errorf("unexpected failure to retrieve final height of operating epoch: %w", err) + } - return nil - }) + // this does not require a lock, as block ID of an height does not change + err = operation.LookupBlockHeight(r, refEpochFinalHeight, &refEpochFinalID) if err != nil { - return nil, fmt.Errorf("could not get block build context: %w", err) + // if we are able to retrieve the epoch's final height, the block must be finalized + // therefore failing to look up its height here is an unexpected error + return nil, irrecoverable.NewExceptionf("could not retrieve ID of finalized final block of operating epoch: %w", err) } + + // cache the values + b.epochFinalHeight = &refEpochFinalHeight + b.epochFinalID = &refEpochFinalID + // store the values in the build context + ctx.refEpochFinalID = b.epochFinalID + ctx.refEpochFinalHeight = b.epochFinalHeight + return ctx, nil } @@ -308,7 +318,7 @@ func (b *Builder) populateUnfinalizedAncestryLookup(ctx *blockBuildContext) erro // The traversal is structured so that we check every collection whose reference // block height translates to a possible constituent transaction which could also // appear in the collection we are building. -func (b *Builder) populateFinalizedAncestryLookup(ctx *blockBuildContext) error { +func (b *Builder) populateFinalizedAncestryLookup(lctx lockctx.Proof, ctx *blockBuildContext) error { minRefHeight := ctx.lowestPossibleReferenceBlockHeight() maxRefHeight := ctx.highestPossibleReferenceBlockHeight() lookup := ctx.lookup @@ -336,7 +346,7 @@ func (b *Builder) populateFinalizedAncestryLookup(ctx *blockBuildContext) error // the finalized cluster blocks which could possibly contain any conflicting transactions var clusterBlockIDs []flow.Identifier start, end := findRefHeightSearchRangeForConflictingClusterBlocks(minRefHeight, maxRefHeight) - err := b.db.View(operation.LookupClusterBlocksByReferenceHeightRange(start, end, &clusterBlockIDs)) + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, b.db.Reader(), start, end, &clusterBlockIDs) if err != nil { return fmt.Errorf("could not lookup finalized cluster blocks by reference height range [%d,%d]: %w", start, end, err) } diff --git a/module/builder/collection/builder_test.go b/module/builder/collection/builder_test.go index a9b387b407d..b16f719299b 100644 --- a/module/builder/collection/builder_test.go +++ b/module/builder/collection/builder_test.go @@ -3,11 +3,13 @@ package collection_test import ( "context" "errors" + "fmt" "math/rand" "os" "testing" "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -30,9 +32,10 @@ import ( "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" "github.com/onflow/flow-go/state/protocol/util" "github.com/onflow/flow-go/storage" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/procedure" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -41,8 +44,10 @@ var noopSigner = func(*flow.Header) error { return nil } type BuilderSuite struct { suite.Suite - db *badger.DB - dbdir string + db storage.DB + badgerDB *badger.DB + dbdir string + lockManager lockctx.Manager genesis *model.Block chainID flow.ChainID @@ -63,6 +68,8 @@ type BuilderSuite struct { // runs before each test runs func (suite *BuilderSuite) SetupTest() { + fmt.Println("SetupTest>>>>") + suite.lockManager = storage.NewTestingLockManager() var err error suite.genesis = model.Genesis() @@ -71,18 +78,19 @@ func (suite *BuilderSuite) SetupTest() { suite.pool = herocache.NewTransactions(1000, unittest.Logger(), metrics.NewNoopCollector()) suite.dbdir = unittest.TempDir(suite.T()) - suite.db = unittest.BadgerDB(suite.T(), suite.dbdir) + suite.badgerDB = unittest.BadgerDB(suite.T(), suite.dbdir) + suite.db = badgerimpl.ToDB(suite.badgerDB) metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() - all := bstorage.InitAll(metrics, suite.db) + all := store.InitAll(metrics, suite.db) consumer := events.NewNoop() suite.headers = all.Headers suite.blocks = all.Blocks - suite.payloads = bstorage.NewClusterPayloads(metrics, suite.db) + suite.payloads = store.NewClusterPayloads(metrics, suite.db) // just bootstrap with a genesis block, we'll use this as reference root, result, seal := unittest.BootstrapFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) @@ -91,13 +99,16 @@ func (suite *BuilderSuite) SetupTest() { seal.ResultID = result.ID() safetyParams, err := protocol.DefaultEpochSafetyParams(root.Header.ChainID) require.NoError(suite.T(), err) + minEpochStateEntry, err := inmem.EpochProtocolStateFromServiceEvents( + result.ServiceEvents[0].Event.(*flow.EpochSetup), + result.ServiceEvents[1].Event.(*flow.EpochCommit), + ) + require.NoError(suite.T(), err) rootProtocolState, err := kvstore.NewDefaultKVStore( safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, - inmem.EpochProtocolStateFromServiceEvents( - result.ServiceEvents[0].Event.(*flow.EpochSetup), - result.ServiceEvents[1].Event.(*flow.EpochCommit), - ).ID()) + minEpochStateEntry.ID(), + ) require.NoError(suite.T(), err) root.Payload.ProtocolStateID = rootProtocolState.ID() rootSnapshot, err := inmem.SnapshotFromBootstrapState(root, result, seal, unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(root.ID()))) @@ -106,31 +117,35 @@ func (suite *BuilderSuite) SetupTest() { require.NoError(suite.T(), err) clusterQC := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(suite.genesis.ID())) + minEpochStateEntry, err = inmem.EpochProtocolStateFromServiceEvents( + result.ServiceEvents[0].Event.(*flow.EpochSetup), + result.ServiceEvents[1].Event.(*flow.EpochCommit), + ) + require.NoError(suite.T(), err) rootProtocolState, err = kvstore.NewDefaultKVStore( safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, - inmem.EpochProtocolStateFromServiceEvents( - result.ServiceEvents[0].Event.(*flow.EpochSetup), - result.ServiceEvents[1].Event.(*flow.EpochCommit), - ).ID()) + minEpochStateEntry.ID(), + ) require.NoError(suite.T(), err) root.Payload.ProtocolStateID = rootProtocolState.ID() clusterStateRoot, err := clusterkv.NewStateRoot(suite.genesis, clusterQC, suite.epochCounter) suite.Require().NoError(err) - clusterState, err := clusterkv.Bootstrap(suite.db, clusterStateRoot) + clusterState, err := clusterkv.Bootstrap(suite.db, suite.lockManager, clusterStateRoot) suite.Require().NoError(err) - suite.state, err = clusterkv.NewMutableState(clusterState, tracer, suite.headers, suite.payloads) + suite.state, err = clusterkv.NewMutableState(clusterState, suite.lockManager, tracer, suite.headers, suite.payloads) suite.Require().NoError(err) state, err := pbadger.Bootstrap( metrics, suite.db, + suite.lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, all.EpochProtocolStateEntries, all.ProtocolKVStore, @@ -161,35 +176,45 @@ func (suite *BuilderSuite) SetupTest() { suite.Assert().True(added) } - suite.builder, _ = builder.NewBuilder(suite.db, tracer, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter) + suite.builder, _ = builder.NewBuilder(suite.db, tracer, suite.lockManager, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter) } // runs after each test finishes func (suite *BuilderSuite) TearDownTest() { - err := suite.db.Close() + err := suite.badgerDB.Close() suite.Assert().NoError(err) err = os.RemoveAll(suite.dbdir) suite.Assert().NoError(err) } func (suite *BuilderSuite) InsertBlock(block model.Block) { - err := suite.db.Update(procedure.InsertClusterBlock(&block)) + lctx := suite.lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock) + suite.Assert().NoError(err) + err = suite.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.InsertClusterBlock(lctx, rw, &block) + }) suite.Assert().NoError(err) } func (suite *BuilderSuite) FinalizeBlock(block model.Block) { - err := suite.db.Update(func(tx *badger.Txn) error { + lctx := suite.lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock) + suite.Assert().NoError(err) + + err = suite.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { var refBlock flow.Header - err := operation.RetrieveHeader(block.Payload.ReferenceBlockID, &refBlock)(tx) + err := operation.RetrieveHeader(rw.GlobalReader(), block.Payload.ReferenceBlockID, &refBlock) if err != nil { return err } - err = procedure.FinalizeClusterBlock(block.ID())(tx) + err = procedure.FinalizeClusterBlock(lctx, rw, block.ID()) if err != nil { return err } - err = operation.IndexClusterBlockByReferenceHeight(refBlock.Height, block.ID())(tx) - return err + return operation.IndexClusterBlockByReferenceHeight(lctx, rw.Writer(), refBlock.Height, block.ID()) }) suite.Assert().NoError(err) } @@ -251,7 +276,7 @@ func (suite *BuilderSuite) TestBuildOn_Success() { // should be able to retrieve built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Assert().NoError(err) builtCollection := built.Payload.Collection @@ -314,7 +339,7 @@ func (suite *BuilderSuite) TestBuildOn_WithUnknownReferenceBlock() { // should be able to retrieve built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Assert().NoError(err) builtCollection := built.Payload.Collection @@ -354,7 +379,7 @@ func (suite *BuilderSuite) TestBuildOn_WithUnfinalizedReferenceBlock() { // should be able to retrieve built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Assert().NoError(err) builtCollection := built.Payload.Collection @@ -401,7 +426,7 @@ func (suite *BuilderSuite) TestBuildOn_WithOrphanedReferenceBlock() { // should be able to retrieve built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Assert().NoError(err) builtCollection := built.Payload.Collection @@ -444,7 +469,7 @@ func (suite *BuilderSuite) TestBuildOn_WithForks() { // should be able to retrieve built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) assert.NoError(t, err) builtCollection := built.Payload.Collection @@ -487,7 +512,7 @@ func (suite *BuilderSuite) TestBuildOn_ConflictingFinalizedBlock() { // retrieve the built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) assert.NoError(t, err) builtCollection := built.Payload.Collection @@ -536,7 +561,7 @@ func (suite *BuilderSuite) TestBuildOn_ConflictingInvalidatedForks() { // retrieve the built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) assert.NoError(t, err) builtCollection := built.Payload.Collection @@ -551,7 +576,7 @@ func (suite *BuilderSuite) TestBuildOn_LargeHistory() { // use a mempool with 2000 transactions, one per block suite.pool = herocache.NewTransactions(2000, unittest.Logger(), metrics.NewNoopCollector()) - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionSize(10000)) + suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.lockManager, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionSize(10000)) // get a valid reference block ID final, err := suite.protoState.Final().Head() @@ -585,7 +610,7 @@ func (suite *BuilderSuite) TestBuildOn_LargeHistory() { // conflicting fork, build on the parent of the head parent := head if conflicting { - err = suite.db.View(procedure.RetrieveClusterBlock(parent.Header.ParentID, &parent)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), parent.Header.ParentID, &parent) assert.NoError(t, err) // add the transaction to the invalidated list invalidatedTxIds = append(invalidatedTxIds, tx.ID()) @@ -620,7 +645,7 @@ func (suite *BuilderSuite) TestBuildOn_LargeHistory() { // retrieve the built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) require.NoError(t, err) builtCollection := built.Payload.Collection @@ -631,7 +656,7 @@ func (suite *BuilderSuite) TestBuildOn_LargeHistory() { func (suite *BuilderSuite) TestBuildOn_MaxCollectionSize() { // set the max collection size to 1 - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionSize(1)) + suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.lockManager, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionSize(1)) // build a block header, err := suite.builder.BuildOn(suite.genesis.ID(), noopSetter, noopSigner) @@ -639,7 +664,7 @@ func (suite *BuilderSuite) TestBuildOn_MaxCollectionSize() { // retrieve the built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Require().NoError(err) builtCollection := built.Payload.Collection @@ -649,7 +674,7 @@ func (suite *BuilderSuite) TestBuildOn_MaxCollectionSize() { func (suite *BuilderSuite) TestBuildOn_MaxCollectionByteSize() { // set the max collection byte size to 400 (each tx is about 150 bytes) - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionByteSize(400)) + suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.lockManager, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionByteSize(400)) // build a block header, err := suite.builder.BuildOn(suite.genesis.ID(), noopSetter, noopSigner) @@ -657,7 +682,7 @@ func (suite *BuilderSuite) TestBuildOn_MaxCollectionByteSize() { // retrieve the built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Require().NoError(err) builtCollection := built.Payload.Collection @@ -667,7 +692,7 @@ func (suite *BuilderSuite) TestBuildOn_MaxCollectionByteSize() { func (suite *BuilderSuite) TestBuildOn_MaxCollectionTotalGas() { // set the max gas to 20,000 - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionTotalGas(20000)) + suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.lockManager, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionTotalGas(20000)) // build a block header, err := suite.builder.BuildOn(suite.genesis.ID(), noopSetter, noopSigner) @@ -675,7 +700,7 @@ func (suite *BuilderSuite) TestBuildOn_MaxCollectionTotalGas() { // retrieve the built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Require().NoError(err) builtCollection := built.Payload.Collection @@ -705,7 +730,7 @@ func (suite *BuilderSuite) TestBuildOn_ExpiredTransaction() { // reset the pool and builder suite.pool = herocache.NewTransactions(10, unittest.Logger(), metrics.NewNoopCollector()) - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter) + suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.lockManager, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter) // insert a transaction referring genesis (now expired) tx1 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { @@ -732,7 +757,7 @@ func (suite *BuilderSuite) TestBuildOn_ExpiredTransaction() { // retrieve the built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Require().NoError(err) builtCollection := built.Payload.Collection @@ -747,13 +772,13 @@ func (suite *BuilderSuite) TestBuildOn_EmptyMempool() { // start with an empty mempool suite.pool = herocache.NewTransactions(1000, unittest.Logger(), metrics.NewNoopCollector()) - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter) + suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.lockManager, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter) header, err := suite.builder.BuildOn(suite.genesis.ID(), noopSetter, noopSigner) suite.Require().NoError(err) var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Require().NoError(err) // should reference a valid reference block @@ -774,7 +799,7 @@ func (suite *BuilderSuite) TestBuildOn_NoRateLimiting() { suite.ClearPool() // create builder with no rate limit and max 10 tx/collection - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, + suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.lockManager, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionSize(10), builder.WithMaxPayerTransactionRate(0), ) @@ -798,7 +823,7 @@ func (suite *BuilderSuite) TestBuildOn_NoRateLimiting() { // each collection should be full with 10 transactions var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Assert().NoError(err) suite.Assert().Len(built.Payload.Collection.Transactions, 10) } @@ -815,7 +840,7 @@ func (suite *BuilderSuite) TestBuildOn_RateLimitNonPayer() { suite.ClearPool() // create builder with 5 tx/payer and max 10 tx/collection - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, + suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.lockManager, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionSize(10), builder.WithMaxPayerTransactionRate(5), ) @@ -845,7 +870,7 @@ func (suite *BuilderSuite) TestBuildOn_RateLimitNonPayer() { // each collection should be full with 10 transactions var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Assert().NoError(err) suite.Assert().Len(built.Payload.Collection.Transactions, 10) } @@ -859,7 +884,7 @@ func (suite *BuilderSuite) TestBuildOn_HighRateLimit() { suite.ClearPool() // create builder with 5 tx/payer and max 10 tx/collection - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, + suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.lockManager, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionSize(10), builder.WithMaxPayerTransactionRate(5), ) @@ -883,7 +908,7 @@ func (suite *BuilderSuite) TestBuildOn_HighRateLimit() { // each collection should be half-full with 5 transactions var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Assert().NoError(err) suite.Assert().Len(built.Payload.Collection.Transactions, 5) } @@ -897,7 +922,7 @@ func (suite *BuilderSuite) TestBuildOn_LowRateLimit() { suite.ClearPool() // create builder with .5 tx/payer and max 10 tx/collection - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, + suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.lockManager, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionSize(10), builder.WithMaxPayerTransactionRate(.5), ) @@ -922,7 +947,7 @@ func (suite *BuilderSuite) TestBuildOn_LowRateLimit() { // collections should either be empty or have 1 transaction var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Assert().NoError(err) if i%2 == 0 { suite.Assert().Len(built.Payload.Collection.Transactions, 1) @@ -939,7 +964,7 @@ func (suite *BuilderSuite) TestBuildOn_UnlimitedPayer() { // create builder with 5 tx/payer and max 10 tx/collection // configure an unlimited payer payer := unittest.RandomAddressFixture() - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, + suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.lockManager, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionSize(10), builder.WithMaxPayerTransactionRate(5), builder.WithUnlimitedPayers(payer), @@ -963,7 +988,7 @@ func (suite *BuilderSuite) TestBuildOn_UnlimitedPayer() { // each collection should be full with 10 transactions var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Assert().NoError(err) suite.Assert().Len(built.Payload.Collection.Transactions, 10) @@ -980,7 +1005,7 @@ func (suite *BuilderSuite) TestBuildOn_RateLimitDryRun() { // create builder with 5 tx/payer and max 10 tx/collection // configure an unlimited payer payer := unittest.RandomAddressFixture() - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, + suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.lockManager, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionSize(10), builder.WithMaxPayerTransactionRate(5), builder.WithRateLimitDryRun(true), @@ -1004,7 +1029,7 @@ func (suite *BuilderSuite) TestBuildOn_RateLimitDryRun() { // each collection should be full with 10 transactions var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), header.ID(), &built) suite.Assert().NoError(err) suite.Assert().Len(built.Payload.Collection.Transactions, 10) } @@ -1049,13 +1074,15 @@ func benchmarkBuildOn(b *testing.B, size int) { suite.genesis = model.Genesis() suite.chainID = suite.genesis.Header.ChainID + suite.lockManager = storage.NewTestingLockManager() suite.pool = herocache.NewTransactions(1000, unittest.Logger(), metrics.NewNoopCollector()) suite.dbdir = unittest.TempDir(b) - suite.db = unittest.BadgerDB(b, suite.dbdir) + suite.badgerDB = unittest.BadgerDB(b, suite.dbdir) + suite.db = badgerimpl.ToDB(suite.badgerDB) defer func() { - err = suite.db.Close() + err = suite.badgerDB.Close() assert.NoError(b, err) err = os.RemoveAll(suite.dbdir) assert.NoError(b, err) @@ -1063,18 +1090,18 @@ func benchmarkBuildOn(b *testing.B, size int) { metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() - all := bstorage.InitAll(metrics, suite.db) + all := store.InitAll(metrics, suite.db) suite.headers = all.Headers suite.blocks = all.Blocks - suite.payloads = bstorage.NewClusterPayloads(metrics, suite.db) + suite.payloads = store.NewClusterPayloads(metrics, suite.db) qc := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(suite.genesis.ID())) stateRoot, err := clusterkv.NewStateRoot(suite.genesis, qc, suite.epochCounter) - state, err := clusterkv.Bootstrap(suite.db, stateRoot) + state, err := clusterkv.Bootstrap(suite.db, suite.lockManager, stateRoot) assert.NoError(b, err) - suite.state, err = clusterkv.NewMutableState(state, tracer, suite.headers, suite.payloads) + suite.state, err = clusterkv.NewMutableState(state, suite.lockManager, tracer, suite.headers, suite.payloads) assert.NoError(b, err) // add some transactions to transaction pool @@ -1085,19 +1112,29 @@ func benchmarkBuildOn(b *testing.B, size int) { } // create the builder - suite.builder, _ = builder.NewBuilder(suite.db, tracer, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter) + suite.builder, _ = builder.NewBuilder(suite.db, tracer, suite.lockManager, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter) } // create a block history to test performance against final := suite.genesis for i := 0; i < size; i++ { block := unittest.ClusterBlockWithParent(final) - err := suite.db.Update(procedure.InsertClusterBlock(&block)) + lctx := suite.lockManager.NewContext() + require.NoError(b, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + err := suite.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.InsertClusterBlock(lctx, rw, &block) + }) + lctx.Release() require.NoError(b, err) // finalize the block 80% of the time, resulting in a fork-rate of 20% if rand.Intn(100) < 80 { - err = suite.db.Update(procedure.FinalizeClusterBlock(block.ID())) + lctx := suite.lockManager.NewContext() + defer lctx.Release() + require.NoError(suite.T(), lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + err = suite.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.FinalizeClusterBlock(lctx, rw, block.ID()) + }) require.NoError(b, err) final = &block } diff --git a/module/builder/consensus/builder.go b/module/builder/consensus/builder.go index 305e4ab6da0..5f7e92d2d52 100644 --- a/module/builder/consensus/builder.go +++ b/module/builder/consensus/builder.go @@ -16,6 +16,7 @@ import ( "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/blocktimer" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/deferred" ) // Builder is the builder for consensus block payloads. Upon providing a payload @@ -414,10 +415,13 @@ func (b *Builder) getInsertableSeals(parentID flow.Identifier) ([]*flow.Seal, er // re-assemble the IncorporatedResult because we need its ID to // check if it is in the seal mempool. - incorporatedResult := flow.NewIncorporatedResult( - blockID, - result, - ) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: blockID, + Result: result, + }) + if err != nil { + return fmt.Errorf("could not create incorporated result for block %x: %w", blockID, err) + } // enforce condition (0): candidate seals are only constructed once sufficient // approvals have been collected. Hence, any incorporated result for which we @@ -638,7 +642,8 @@ func (b *Builder) createProposal(parentID flow.Identifier, // Evolve the Protocol State starting from the parent block's state. Information that may change the state is: // the candidate block's view and Service Events from execution results sealed in the candidate block. - protocolStateID, _, err := b.mutableProtocolState.EvolveState(header.ParentID, header.View, seals) + deferredBlockPersist := deferred.NewDeferredBlockPersist() + protocolStateID, err := b.mutableProtocolState.EvolveState(deferredBlockPersist, header.ParentID, header.View, seals) if err != nil { return nil, fmt.Errorf("evolving protocol state failed: %w", err) } diff --git a/module/builder/consensus/builder_test.go b/module/builder/consensus/builder_test.go index 77db1050790..f20b7c756af 100644 --- a/module/builder/consensus/builder_test.go +++ b/module/builder/consensus/builder_test.go @@ -21,10 +21,10 @@ import ( "github.com/onflow/flow-go/module/trace" realproto "github.com/onflow/flow-go/state/protocol" protocol "github.com/onflow/flow-go/state/protocol/mock" - storerr "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" - storage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/utils/unittest" ) @@ -73,12 +73,12 @@ type BuilderSuite struct { // mocked dependencies state *protocol.ParticipantState - headerDB *storage.Headers - sealDB *storage.Seals - indexDB *storage.Index - blockDB *storage.Blocks - resultDB *storage.ExecutionResults - receiptsDB *storage.ExecutionReceipts + headerDB *storagemock.Headers + sealDB *storagemock.Seals + indexDB *storagemock.Index + blockDB *storagemock.Blocks + resultDB *storagemock.ExecutionResults + receiptsDB *storagemock.ExecutionReceipts stateMutator *protocol.MutableProtocolState guarPool *mempool.Guarantees @@ -178,7 +178,6 @@ func (bs *BuilderSuite) chainSeal(incorporatedResult *flow.IncorporatedResult) { // For the verifiers to start checking a result R, they need a source of randomness for the block _incorporating_ // result R. The result for block [A3] is incorporated in [parent], which does _not_ have a child yet. func (bs *BuilderSuite) SetupTest() { - // set up no-op dependencies noopMetrics := metrics.NewNoopCollector() noopTracer := trace.NewNoopTracer() @@ -250,22 +249,23 @@ func (bs *BuilderSuite) SetupTest() { // set up temporary database for tests bs.db, bs.dir = unittest.TempBadgerDB(bs.T()) - - err := bs.db.Update(operation.InsertFinalizedHeight(final.Header.Height)) - bs.Require().NoError(err) - err = bs.db.Update(operation.IndexBlockHeight(final.Header.Height, bs.finalID)) - bs.Require().NoError(err) - - err = bs.db.Update(operation.InsertRootHeight(13)) - bs.Require().NoError(err) - - err = bs.db.Update(operation.InsertSealedHeight(first.Header.Height)) - bs.Require().NoError(err) - err = bs.db.Update(operation.IndexBlockHeight(first.Header.Height, first.ID())) - bs.Require().NoError(err) + lockManager := storage.NewTestingLockManager() + + lctx := lockManager.NewContext() + require.NoError(bs.T(), lctx.AcquireLock(storage.LockFinalizeBlock)) + defer lctx.Release() + + // insert finalized height and root height + db := badgerimpl.ToDB(bs.db) + require.NoError(bs.T(), db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + require.NoError(bs.T(), operation.InsertRootHeight(rw.Writer(), 13)) + require.NoError(bs.T(), operation.UpsertFinalizedHeight(lctx, rw.Writer(), final.Header.Height)) + require.NoError(bs.T(), operation.IndexFinalizedBlockByHeight(lctx, rw, final.Header.Height, bs.finalID)) + require.NoError(bs.T(), operation.UpsertSealedHeight(lctx, rw.Writer(), first.Header.Height)) + return nil + })) bs.sentinel = 1337 - bs.setter = func(header *flow.Header) error { header.View = 1337 return nil @@ -293,10 +293,10 @@ func (bs *BuilderSuite) SetupTest() { bs.state.On("Params").Return(params) // set up storage mocks for tests - bs.sealDB = &storage.Seals{} + bs.sealDB = &storagemock.Seals{} bs.sealDB.On("HighestInFork", mock.Anything).Return(bs.lastSeal, nil) - bs.headerDB = &storage.Headers{} + bs.headerDB = &storagemock.Headers{} bs.headerDB.On("ByBlockID", mock.Anything).Return( func(blockID flow.Identifier) *flow.Header { return bs.headers[blockID] @@ -304,13 +304,13 @@ func (bs *BuilderSuite) SetupTest() { func(blockID flow.Identifier) error { _, exists := bs.headers[blockID] if !exists { - return storerr.ErrNotFound + return storage.ErrNotFound } return nil }, ) - bs.indexDB = &storage.Index{} + bs.indexDB = &storagemock.Index{} bs.indexDB.On("ByBlockID", mock.Anything).Return( func(blockID flow.Identifier) *flow.Index { return bs.index[blockID] @@ -318,13 +318,13 @@ func (bs *BuilderSuite) SetupTest() { func(blockID flow.Identifier) error { _, exists := bs.index[blockID] if !exists { - return storerr.ErrNotFound + return storage.ErrNotFound } return nil }, ) - bs.blockDB = &storage.Blocks{} + bs.blockDB = &storagemock.Blocks{} bs.blockDB.On("ByID", mock.Anything).Return( func(blockID flow.Identifier) *flow.Block { return bs.blocks[blockID] @@ -332,13 +332,13 @@ func (bs *BuilderSuite) SetupTest() { func(blockID flow.Identifier) error { _, exists := bs.blocks[blockID] if !exists { - return storerr.ErrNotFound + return storage.ErrNotFound } return nil }, ) - bs.resultDB = &storage.ExecutionResults{} + bs.resultDB = &storagemock.ExecutionResults{} bs.resultDB.On("ByID", mock.Anything).Return( func(resultID flow.Identifier) *flow.ExecutionResult { return bs.resultByID[resultID] @@ -346,13 +346,13 @@ func (bs *BuilderSuite) SetupTest() { func(resultID flow.Identifier) error { _, exists := bs.resultByID[resultID] if !exists { - return storerr.ErrNotFound + return storage.ErrNotFound } return nil }, ) - bs.receiptsDB = &storage.ExecutionReceipts{} + bs.receiptsDB = &storagemock.ExecutionReceipts{} bs.receiptsDB.On("ByID", mock.Anything).Return( func(receiptID flow.Identifier) *flow.ExecutionReceipt { return bs.receiptsByID[receiptID] @@ -360,7 +360,7 @@ func (bs *BuilderSuite) SetupTest() { func(receiptID flow.Identifier) error { _, exists := bs.receiptsByID[receiptID] if !exists { - return storerr.ErrNotFound + return storage.ErrNotFound } return nil }, @@ -372,7 +372,7 @@ func (bs *BuilderSuite) SetupTest() { func(blockID flow.Identifier) error { _, exists := bs.receiptsByBlockID[blockID] if !exists { - return storerr.ErrNotFound + return storage.ErrNotFound } return nil }, @@ -422,9 +422,10 @@ func (bs *BuilderSuite) SetupTest() { // setup mock state mutator, we don't need a real once since we are using mocked participant state. bs.stateMutator = protocol.NewMutableProtocolState(bs.T()) - bs.stateMutator.On("EvolveState", mock.Anything, mock.Anything, mock.Anything).Return(unittest.IdentifierFixture(), transaction.NewDeferredBlockPersist(), nil).Maybe() + bs.stateMutator.On("EvolveState", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(unittest.IdentifierFixture(), nil).Maybe() // initialize the builder + var err error bs.build, err = NewBuilder( noopMetrics, bs.state, @@ -694,7 +695,7 @@ func (bs *BuilderSuite) TestPayloadSeals_EnforceGap() { b1seal := storeSealForIncorporatedResult(resultB1, b4.ID(), bs.pendingSeals) // mock for seals storage layer: - bs.sealDB = &storage.Seals{} + bs.sealDB = &storagemock.Seals{} bs.build.seals = bs.sealDB bs.T().Run("Build on top of B4 and check that no seals are included", func(t *testing.T) { @@ -733,7 +734,7 @@ func (bs *BuilderSuite) TestPayloadSeals_Duplicate() { // Pretend that the first n blocks are already sealed n := 4 lastSeal := bs.chain[n-1] - mockSealDB := &storage.Seals{} + mockSealDB := &storagemock.Seals{} mockSealDB.On("HighestInFork", mock.Anything).Return(lastSeal, nil) bs.build.seals = mockSealDB @@ -850,7 +851,7 @@ func (bs *BuilderSuite) TestValidatePayloadSeals_ExecutionForks() { for _, b := range blocks { bs.storeBlock(b) } - bs.sealDB = &storage.Seals{} + bs.sealDB = &storagemock.Seals{} bs.build.seals = bs.sealDB bs.sealDB.On("HighestInFork", mock.Anything).Return(sealF, nil) bs.resultByID[sealedResult.ID()] = &sealedResult @@ -899,7 +900,7 @@ func (bs *BuilderSuite) TestPayloadReceipts_TraverseExecutionTreeFromLastSealedR f2 := bs.blocks[bs.finalizedBlockIDs[2]] f2eal := unittest.Seal.Fixture(unittest.Seal.WithResult(bs.resultForBlock[f2.ID()])) f4Seal := unittest.Seal.Fixture(unittest.Seal.WithResult(bs.resultForBlock[bs.finalID])) - bs.sealDB = &storage.Seals{} + bs.sealDB = &storagemock.Seals{} bs.build.seals = bs.sealDB // reset receipts mempool to verify calls made by Builder @@ -963,7 +964,7 @@ func (bs *BuilderSuite) TestPayloadReceipts_IncludeOnlyReceiptsForCurrentFork() // set last sealed blocks: b1Seal := unittest.Seal.Fixture(unittest.Seal.WithResult(bs.resultForBlock[b1.ID()])) - bs.sealDB = &storage.Seals{} + bs.sealDB = &storagemock.Seals{} bs.sealDB.On("HighestInFork", b5.ID()).Return(b1Seal, nil) bs.build.seals = bs.sealDB diff --git a/module/chainsync/core.go b/module/chainsync/core.go index c493df79cb9..48e73494a8f 100644 --- a/module/chainsync/core.go +++ b/module/chainsync/core.go @@ -79,36 +79,45 @@ func New(log zerolog.Logger, config Config, metrics module.ChainSyncMetrics, cha // true if the block should be processed by the compliance layer and false // if it should be ignored. func (c *Core) HandleBlock(header *flow.Header) bool { - log := c.log - if c.log.Debug().Enabled() { - log = c.log.With().Str("block_id", header.ID().String()).Uint64("block_height", header.Height).Logger() - } - c.mu.Lock() - defer c.mu.Unlock() + // Always accept new blocks. Since we forcibly send the block requests, the book keeping below + // will cause the block to ignored. - status := c.getRequestStatus(header.Height, header.ID()) - - // if we never asked for this block, discard it - if !status.WasQueued() { - log.Debug().Msg("discarding not queued block") - return false - } - // if we have already received this block, exit - if status.WasReceived() { - log.Debug().Msg("discarding not received block") - return false - } - - // this is a new block, remember that we've seen it - status.Header = header - status.Received = time.Now() - - // track it by ID and by height so we don't accidentally request it again - c.blockIDs[header.ID()] = status - c.heights[header.Height] = status - - log.Debug().Msg("handled block") + c.log.Info(). + Str("block_id", header.ID().String()). + Uint64("block_height", header.Height). + Msg("handled block") return true + + // log := c.log + // if c.log.Debug().Enabled() { + // log = c.log.With().Str("block_id", header.ID().String()).Uint64("block_height", header.Height).Logger() + // } + // c.mu.Lock() + // defer c.mu.Unlock() + + // status := c.getRequestStatus(header.Height, header.ID()) + + // // if we never asked for this block, discard it + // if !status.WasQueued() { + // log.Debug().Msg("discarding not queued block") + // return false + // } + // // if we have already received this block, exit + // if status.WasReceived() { + // log.Debug().Msg("discarding not received block") + // return false + // } + + // // this is a new block, remember that we've seen it + // status.Header = header + // status.Received = time.Now() + + // // track it by ID and by height so we don't accidentally request it again + // c.blockIDs[header.ID()] = status + // c.heights[header.Height] = status + + // log.Debug().Msg("handled block") + // return true } // HandleHeight handles receiving a new highest finalized height from another node. diff --git a/module/dkg/recovery_test.go b/module/dkg/recovery_test.go index ee09565e5c9..aadb6d975e1 100644 --- a/module/dkg/recovery_test.go +++ b/module/dkg/recovery_test.go @@ -153,7 +153,7 @@ func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_NextEpochRetrieveMyBea // In case like this there is no need for recovery and we should exit early. func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_KeyAlreadyRecovered() { s.dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return( - unittest.PrivateKeyFixture(crypto.ECDSAP256, unittest.DefaultSeedFixtureLength), true, nil).Once() + unittest.PrivateKeyFixture(crypto.BLSBLS12381), true, nil).Once() recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, s.dkgState) require.NoError(s.T(), err) @@ -219,7 +219,7 @@ func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_NoSafeMyBeaconPrivateK func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_NextEpochDKGException() { s.dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return(nil, false, nil).Once() // have a safe key for the current epoch - myBeaconKey := unittest.PrivateKeyFixture(crypto.ECDSAP256, unittest.DefaultSeedFixtureLength) + myBeaconKey := unittest.PrivateKeyFixture(crypto.BLSBLS12381) s.dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return(myBeaconKey, true, nil).Once() exception := errors.New("exception") @@ -240,7 +240,7 @@ func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_NextEpochDKGException( func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_NextEpochKeyShareException() { s.dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return(nil, false, nil).Once() // have a safe key for the current epoch - myBeaconKey := unittest.PrivateKeyFixture(crypto.ECDSAP256, unittest.DefaultSeedFixtureLength) + myBeaconKey := unittest.PrivateKeyFixture(crypto.BLSBLS12381) s.dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return(myBeaconKey, true, nil).Once() exception := errors.New("exception") @@ -265,7 +265,7 @@ func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_NodeIsNotPartOfNextEpo dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return(nil, false, nil).Once() // have a safe key for the current epoch - myBeaconKey := unittest.PrivateKeyFixture(crypto.ECDSAP256, unittest.DefaultSeedFixtureLength) + myBeaconKey := unittest.PrivateKeyFixture(crypto.BLSBLS12381) dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return(myBeaconKey, true, nil).Once() // node is not part of the DKG for the next epoch dkg := mockprotocol.NewDKG(s.T()) @@ -283,11 +283,11 @@ func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_NodeIsNotPartOfNextEpo dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return(nil, false, nil).Once() // have a safe key for the current epoch - myBeaconKey := unittest.PrivateKeyFixture(crypto.ECDSAP256, unittest.DefaultSeedFixtureLength) + myBeaconKey := unittest.PrivateKeyFixture(crypto.BLSBLS12381) dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return(myBeaconKey, true, nil).Once() // DKG doesn't contain a public key for our private key. dkg := mockprotocol.NewDKG(s.T()) - randomPubKey := unittest.PublicKeysFixture(1, crypto.ECDSAP256)[0] + randomPubKey := unittest.PublicKeysFixture(1, crypto.BLSBLS12381)[0] dkg.On("KeyShare", s.local.NodeID()).Return(randomPubKey, nil).Once() s.nextEpoch.On("DKG").Return(dkg, nil).Once() @@ -308,7 +308,7 @@ func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_NodeIsNotPartOfNextEpo func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_RecoverKey() { performTest := func(dkgState *mockstorage.EpochRecoveryMyBeaconKey) { // have a safe key for the current epoch - myBeaconKey := unittest.PrivateKeyFixture(crypto.ECDSAP256, unittest.DefaultSeedFixtureLength) + myBeaconKey := unittest.PrivateKeyFixture(crypto.BLSBLS12381) dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return(myBeaconKey, true, nil).Once() // node is part of the DKG for the next epoch dkg := mockprotocol.NewDKG(s.T()) @@ -364,7 +364,7 @@ func (s *BeaconKeyRecoverySuite) TestEpochFallbackModeExited() { s.dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return(nil, false, nil).Once() // have a safe key for the current epoch - myBeaconKey := unittest.PrivateKeyFixture(crypto.ECDSAP256, unittest.DefaultSeedFixtureLength) + myBeaconKey := unittest.PrivateKeyFixture(crypto.BLSBLS12381) s.dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return(myBeaconKey, true, nil).Once() // node is part of the DKG for the next epoch dkg := mockprotocol.NewDKG(s.T()) diff --git a/module/epochs/machine_account_test.go b/module/epochs/machine_account_test.go index 0eb5c593bcf..e80af3e31ce 100644 --- a/module/epochs/machine_account_test.go +++ b/module/epochs/machine_account_test.go @@ -36,7 +36,7 @@ func TestMachineAccountChecking(t *testing.T) { }) t.Run("inconsistent key", func(t *testing.T) { local, remote := unittest.MachineAccountFixture(t) - randomKey := unittest.PrivateKeyFixture(crypto.ECDSAP256, unittest.DefaultSeedFixtureLength) + randomKey := unittest.PrivateKeyFixture(crypto.ECDSAP256) remote.Keys[0].PublicKey = randomKey.PublicKey() err := CheckMachineAccountInfo(zerolog.Nop(), conf, flow.RoleConsensus, local, remote) require.Error(t, err) @@ -154,7 +154,7 @@ func TestMachineAccountChecking(t *testing.T) { local, remote := unittest.MachineAccountFixture(t) // non-standard sig algo - sk := unittest.PrivateKeyFixture(crypto.ECDSASecp256k1, unittest.DefaultSeedFixtureLength) + sk := unittest.PrivateKeyFixture(crypto.ECDSASecp256k1) local.EncodedPrivateKey = sk.Encode() local.SigningAlgorithm = crypto.ECDSASecp256k1 // consistent between local/remote diff --git a/module/execution/scripts_test.go b/module/execution/scripts_test.go index 4f86e3fe2cf..2a3599a3fee 100644 --- a/module/execution/scripts_test.go +++ b/module/execution/scripts_test.go @@ -61,7 +61,7 @@ func (s *scriptTestSuite) TestScriptExecution() { s.Run("Get Block", func() { code := []byte(fmt.Sprintf(`access(all) fun main(): UInt64 { getBlock(at: %d)! - return getCurrentBlock().height + return getCurrentBlock().height }`, s.height)) result, err := s.scripts.ExecuteAtBlockHeight(context.Background(), code, nil, s.height) @@ -153,6 +153,7 @@ func (s *scriptTestSuite) TestGetAccountKeys() { } func (s *scriptTestSuite) SetupTest() { + lockManager := storage.NewTestingLockManager() logger := unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) entropyProvider := testutil.ProtocolStateWithSourceFixture(nil) blockchain := unittest.BlockchainFixture(10) @@ -190,6 +191,7 @@ func (s *scriptTestSuite) SetupTest() { flow.Testnet.Chain(), derivedChainData, nil, + lockManager, ) s.Require().NoError(err) diff --git a/module/executiondatasync/optimistic_sync/core.go b/module/executiondatasync/optimistic_sync/core.go new file mode 100644 index 00000000000..fad5723f476 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/core.go @@ -0,0 +1,316 @@ +package optimistic_sync + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" + + "github.com/onflow/flow-go/engine/access/ingestion/tx_error_messages" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync/persisters" + "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync/persisters/stores" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/module/state_synchronization/requester" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store/inmemory/unsynchronized" +) + +// TODO: DefaultTxResultErrMsgsRequestTimeout should be configured in future PR`s + +const DefaultTxResultErrMsgsRequestTimeout = 5 * time.Second + +// Core defines the interface for pipeline processing steps. +// Each implementation should handle an execution data and implement the three-phase processing: +// download, index, and persist. +// CAUTION: The Core instance should not be used after Abandon is called as it could cause panic due to cleared data. +// Core implementations must be +// - CONCURRENCY SAFE +type Core interface { + // Download retrieves all necessary data for processing. + // Concurrency safe - all operations will be executed sequentially. + // + // Expected errors: + // - context.Canceled: if the provided context was canceled before completion + // - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) + Download(ctx context.Context) error + + // Index processes the downloaded data and creates in-memory indexes. + // Concurrency safe - all operations will be executed sequentially. + // + // No errors are expected during normal operations + Index() error + + // Persist stores the indexed data in permanent storage. + // Concurrency safe - all operations will be executed sequentially. + // + // No errors are expected during normal operations + Persist() error + + // Abandon indicates that the protocol has abandoned this state. Hence processing will be aborted + // and any data dropped. + // Concurrency safe - all operations will be executed sequentially. + // CAUTION: The Core instance should not be used after Abandon is called as it could cause panic due to cleared data. + // + // No errors are expected during normal operations + Abandon() error +} + +// workingData encapsulates all components and temporary storage +// involved in processing a single block's execution data. When processing +// is complete or abandoned, the entire workingData can be discarded. +type workingData struct { + // Temporary in-memory caches + inmemRegisters *unsynchronized.Registers + inmemEvents *unsynchronized.Events + inmemCollections *unsynchronized.Collections + inmemTransactions *unsynchronized.Transactions + inmemResults *unsynchronized.LightTransactionResults + inmemTxResultErrMsgs *unsynchronized.TransactionResultErrorMessages + + // Active processing components + execDataRequester requester.ExecutionDataRequester + txResultErrMsgsRequester tx_error_messages.Requester + txResultErrMsgsRequestTimeout time.Duration + indexer *indexer.InMemoryIndexer + blockPersister *persisters.BlockPersister + registersPersister *persisters.RegistersPersister + + // Working data + executionData *execution_data.BlockExecutionDataEntity + txResultErrMsgsData []flow.TransactionResultErrorMessage +} + +var _ Core = (*CoreImpl)(nil) + +// CoreImpl implements the Core interface for processing execution data. +// It coordinates the download, indexing, and persisting of execution data. +// Concurrency safe - all operations will be executed sequentially. +// CAUTION: The CoreImpl instance should not be used after Abandon is called as it could cause panic due to cleared data. +type CoreImpl struct { + log zerolog.Logger + mu sync.Mutex + + workingData *workingData + + executionResult *flow.ExecutionResult + header *flow.Header +} + +// NewCoreImpl creates a new CoreImpl with all necessary dependencies +// Concurrency safe - all operations will be executed sequentially. +func NewCoreImpl( + logger zerolog.Logger, + executionResult *flow.ExecutionResult, + header *flow.Header, + execDataRequester requester.ExecutionDataRequester, + txResultErrMsgsRequester tx_error_messages.Requester, + txResultErrMsgsRequestTimeout time.Duration, + persistentRegisters storage.RegisterIndex, + persistentEvents storage.Events, + persistentCollections storage.Collections, + persistentResults storage.LightTransactionResults, + persistentTxResultErrMsg storage.TransactionResultErrorMessages, + latestPersistedSealedResult storage.LatestPersistedSealedResult, + protocolDB storage.DB, + lockManager storage.LockManager, +) *CoreImpl { + coreLogger := logger.With(). + Str("component", "execution_data_core"). + Str("execution_result_id", executionResult.ID().String()). + Str("block_id", executionResult.BlockID.String()). + Uint64("height", header.Height). + Logger() + + inmemRegisters := unsynchronized.NewRegisters(header.Height) + inmemEvents := unsynchronized.NewEvents() + inmemTransactions := unsynchronized.NewTransactions() + inmemCollections := unsynchronized.NewCollections(inmemTransactions) + inmemResults := unsynchronized.NewLightTransactionResults() + inmemTxResultErrMsgs := unsynchronized.NewTransactionResultErrorMessages() + + indexerComponent := indexer.NewInMemoryIndexer( + coreLogger, + inmemRegisters, + inmemEvents, + inmemCollections, + inmemResults, + inmemTxResultErrMsgs, + executionResult, + header, + lockManager, + ) + + persisterStores := []stores.PersisterStore{ + stores.NewEventsStore(inmemEvents, persistentEvents, executionResult.BlockID), + stores.NewResultsStore(inmemResults, persistentResults, executionResult.BlockID), + stores.NewCollectionsStore(inmemCollections, persistentCollections, lockManager), + stores.NewTxResultErrMsgStore(inmemTxResultErrMsgs, persistentTxResultErrMsg, executionResult.BlockID), + stores.NewLatestSealedResultStore(latestPersistedSealedResult, executionResult.ID(), header.Height), + } + + blockPersister := persisters.NewBlockPersister( + coreLogger, + protocolDB, + lockManager, + executionResult, + header, + persisterStores, + ) + + registerPersister := persisters.NewRegistersPersister(inmemRegisters, persistentRegisters, header.Height) + + return &CoreImpl{ + log: coreLogger, + workingData: &workingData{ + execDataRequester: execDataRequester, + txResultErrMsgsRequester: txResultErrMsgsRequester, + txResultErrMsgsRequestTimeout: txResultErrMsgsRequestTimeout, + indexer: indexerComponent, + blockPersister: blockPersister, + registersPersister: registerPersister, + inmemRegisters: inmemRegisters, + inmemEvents: inmemEvents, + inmemCollections: inmemCollections, + inmemTransactions: inmemTransactions, + inmemResults: inmemResults, + inmemTxResultErrMsgs: inmemTxResultErrMsgs, + }, + executionResult: executionResult, + header: header, + } +} + +// Download downloads execution data and transaction results error for the block +// Concurrency safe - all operations will be executed sequentially. +// +// Expected errors: +// - context.Canceled: if the provided context was canceled before completion +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +func (c *CoreImpl) Download(ctx context.Context) error { + c.mu.Lock() + defer c.mu.Unlock() + c.log.Debug().Msg("downloading execution data") + + g, gCtx := errgroup.WithContext(ctx) + + var executionData *execution_data.BlockExecutionData + g.Go(func() error { + var err error + executionData, err = c.workingData.execDataRequester.RequestExecutionData(gCtx) + // executionData are CRITICAL. Any failure here causes the entire download to fail. + if err != nil { + return fmt.Errorf("failed to request execution data: %w", err) + } + + return nil + }) + + var txResultErrMsgsData []flow.TransactionResultErrorMessage + g.Go(func() error { + timeoutCtx, cancel := context.WithTimeout(gCtx, c.workingData.txResultErrMsgsRequestTimeout) + defer cancel() + + var err error + txResultErrMsgsData, err = c.workingData.txResultErrMsgsRequester.Request(timeoutCtx) + if err != nil { + // txResultErrMsgsData are OPTIONAL. Timeout error `context.DeadlineExceeded` is handled gracefully by + // returning nil, allowing processing to continue with empty error messages data. Other errors still cause + // failure. + // + // This approach ensures that temporary unavailability of transaction result error messages doesn't block + // critical execution data processing. + if errors.Is(err, context.DeadlineExceeded) { + c.log.Debug(). + Dur("timeout", c.workingData.txResultErrMsgsRequestTimeout). + Msg("transaction result error messages request timed out") + return nil + } + + return fmt.Errorf("failed to request transaction result error messages data: %w", err) + } + return nil + }) + + if err := g.Wait(); err != nil { + return err + } + + c.workingData.executionData = execution_data.NewBlockExecutionDataEntity(c.executionResult.ExecutionDataID, executionData) + c.workingData.txResultErrMsgsData = txResultErrMsgsData + + c.log.Debug().Msg("successfully downloaded execution data") + + return nil +} + +// Index retrieves the downloaded execution data and transaction results error messages from the caches and indexes them +// into in-memory storage. +// Concurrency safe - all operations will be executed sequentially. +// +// No errors are expected during normal operations +func (c *CoreImpl) Index() error { + c.mu.Lock() + defer c.mu.Unlock() + if c.workingData.executionData == nil { + return fmt.Errorf("could not index an empty execution data") + } + c.log.Debug().Msg("indexing execution data") + + if err := c.workingData.indexer.IndexBlockData(c.workingData.executionData); err != nil { + return err + } + + // Only index transaction result error messages when they are available + if len(c.workingData.txResultErrMsgsData) > 0 { + if err := c.workingData.indexer.IndexTxResultErrorMessagesData(c.workingData.txResultErrMsgsData); err != nil { + return err + } + } + + c.log.Debug().Msg("successfully indexed execution data") + + return nil +} + +// Persist persists the indexed data to permanent storage atomically. +// Concurrency safe - all operations will be executed sequentially. +// +// No errors are expected during normal operations +func (c *CoreImpl) Persist() error { + c.mu.Lock() + defer c.mu.Unlock() + c.log.Debug().Msg("persisting execution data") + + if err := c.workingData.registersPersister.Persist(); err != nil { + return fmt.Errorf("failed to persist register data: %w", err) + } + + if err := c.workingData.blockPersister.Persist(); err != nil { + return fmt.Errorf("failed to persist block data: %w", err) + } + + c.log.Debug().Msg("successfully persisted execution data") + + return nil +} + +// Abandon indicates that the protocol has abandoned this state. Hence processing will be aborted +// and any data dropped. +// Concurrency safe - all operations will be executed sequentially. +// CAUTION: The CoreImpl instance should not be used after Abandon is called as it could cause panic due to cleared data. +// +// No errors are expected during normal operations +func (c *CoreImpl) Abandon() error { + c.mu.Lock() + // Clear in-memory storage and other processing data by setting workingData references to nil for garbage collection + c.workingData = nil + c.mu.Unlock() + + return nil +} diff --git a/module/executiondatasync/optimistic_sync/core_impl_test.go b/module/executiondatasync/optimistic_sync/core_impl_test.go new file mode 100644 index 00000000000..a44615e1ad6 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/core_impl_test.go @@ -0,0 +1,342 @@ +package optimistic_sync + +import ( + "context" + "testing" + "time" + + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + txerrmsgsmock "github.com/onflow/flow-go/engine/access/ingestion/tx_error_messages/mock" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + reqestermock "github.com/onflow/flow-go/module/state_synchronization/requester/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// CoreImplSuite is a test suite for testing the CoreImpl. +type CoreImplSuite struct { + suite.Suite + logger zerolog.Logger + execDataRequester *reqestermock.ExecutionDataRequester + txResultErrMsgsRequester *txerrmsgsmock.Requester + txResultErrMsgsRequestTimeout time.Duration + db *storagemock.DB + lockManager lockctx.Manager + persistentRegisters *storagemock.RegisterIndex + persistentEvents *storagemock.Events + persistentCollections *storagemock.Collections + persistentTransactions *storagemock.Transactions + persistentResults *storagemock.LightTransactionResults + persistentTxResultErrMsg *storagemock.TransactionResultErrorMessages + latestPersistedSealedResult *storagemock.LatestPersistedSealedResult +} + +func TestCoreImplSuiteSuite(t *testing.T) { + t.Parallel() + suite.Run(t, new(CoreImplSuite)) +} + +func (c *CoreImplSuite) SetupTest() { + c.lockManager = storage.NewTestingLockManager() + t := c.T() + c.logger = zerolog.Nop() + + c.execDataRequester = reqestermock.NewExecutionDataRequester(t) + c.txResultErrMsgsRequester = txerrmsgsmock.NewRequester(t) + c.txResultErrMsgsRequestTimeout = DefaultTxResultErrMsgsRequestTimeout + + c.db = storagemock.NewDB(t) + c.db.On("WithReaderBatchWriter", mock.Anything).Return( + func(fn func(storage.ReaderBatchWriter) error) error { + return fn(storagemock.NewBatch(t)) + }, + ).Maybe() + + // Create storage mocks with proper expectations for persist operations + c.persistentRegisters = storagemock.NewRegisterIndex(t) + c.persistentEvents = storagemock.NewEvents(t) + c.persistentCollections = storagemock.NewCollections(t) + c.persistentTransactions = storagemock.NewTransactions(t) + c.persistentResults = storagemock.NewLightTransactionResults(t) + c.persistentTxResultErrMsg = storagemock.NewTransactionResultErrorMessages(t) + c.latestPersistedSealedResult = storagemock.NewLatestPersistedSealedResult(t) + + // Set up default expectations for persist operations + // These will be called by the real Persister during Persist() + c.persistentRegisters.On("Store", mock.Anything, mock.Anything).Return(nil).Maybe() + c.persistentEvents.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() + c.persistentCollections.On("BatchStoreLightAndIndexByTransaction", mock.Anything, mock.Anything).Return(nil).Maybe() + c.persistentTransactions.On("BatchStore", mock.Anything, mock.Anything).Return(nil).Maybe() + c.persistentResults.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() + c.persistentTxResultErrMsg.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() + c.latestPersistedSealedResult.On("BatchSet", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() +} + +// createTestCoreImpl creates a CoreImpl instance with mocked dependencies for testing. +// +// Returns a configured CoreImpl ready for testing. +func (c *CoreImplSuite) createTestCoreImpl() *CoreImpl { + block := unittest.BlockFixture() + executionResult := unittest.ExecutionResultFixture(unittest.WithBlock(&block)) + + return NewCoreImpl( + c.logger, + executionResult, + block.Header, + c.execDataRequester, + c.txResultErrMsgsRequester, + c.txResultErrMsgsRequestTimeout, + c.persistentRegisters, + c.persistentEvents, + c.persistentCollections, + c.persistentResults, + c.persistentTxResultErrMsg, + c.latestPersistedSealedResult, + c.db, + c.lockManager, + ) +} + +// TestCoreImpl_Download tests the Download method which retrieves execution data and transaction error messages. +func (c *CoreImplSuite) TestCoreImpl_Download() { + c.Run("successful download", func() { + core := c.createTestCoreImpl() + ctx := context.Background() + + expectedExecutionData := unittest.BlockExecutionDataFixture(unittest.WithBlockExecutionDataBlockID(core.header.ID())) + c.execDataRequester.On("RequestExecutionData", mock.Anything).Return(expectedExecutionData, nil).Once() + + expectedTxResultErrMsgs := unittest.TransactionResultErrorMessagesFixture(1) + c.txResultErrMsgsRequester.On("Request", mock.Anything).Return(expectedTxResultErrMsgs, nil).Once() + + err := core.Download(ctx) + c.Require().NoError(err) + + c.Assert().Equal(expectedExecutionData, core.workingData.executionData.BlockExecutionData) + c.Assert().Equal(expectedTxResultErrMsgs, core.workingData.txResultErrMsgsData) + }) + + c.Run("execution data request error", func() { + c.execDataRequester.On("RequestExecutionData", mock.Anything).Return((*execution_data.BlockExecutionData)(nil), assert.AnError).Once() + c.txResultErrMsgsRequester.On("Request", mock.Anything).Return(([]flow.TransactionResultErrorMessage)(nil), nil).Once() + + ctx := context.Background() + core := c.createTestCoreImpl() + err := core.Download(ctx) + c.Require().Error(err) + + c.Assert().ErrorIs(err, assert.AnError) + c.Assert().Contains(err.Error(), "failed to request execution data") + c.Assert().Nil(core.workingData.executionData) + c.Assert().Nil(core.workingData.txResultErrMsgsData) + }) + + c.Run("transaction result error messages request error", func() { + expectedExecutionData := unittest.BlockExecutionDataFixture() + c.execDataRequester.On("RequestExecutionData", mock.Anything).Return(expectedExecutionData, nil).Once() + c.txResultErrMsgsRequester.On("Request", mock.Anything).Return(([]flow.TransactionResultErrorMessage)(nil), assert.AnError).Once() + + ctx := context.Background() + core := c.createTestCoreImpl() + + err := core.Download(ctx) + c.Require().Error(err) + + c.Assert().ErrorIs(err, assert.AnError) + c.Assert().Contains(err.Error(), "failed to request transaction result error messages data") + c.Assert().Nil(core.workingData.executionData) + c.Assert().Nil(core.workingData.txResultErrMsgsData) + }) + + c.Run("context cancellation", func() { + core := c.createTestCoreImpl() + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + expectedExecutionData := unittest.BlockExecutionDataFixture() + c.execDataRequester.On("RequestExecutionData", mock.Anything).Return(expectedExecutionData, ctx.Err()).Once() + + expectedTxResultErrMsgs := unittest.TransactionResultErrorMessagesFixture(1) + c.txResultErrMsgsRequester.On("Request", mock.Anything).Return(expectedTxResultErrMsgs, ctx.Err()).Once() + + err := core.Download(ctx) + c.Require().Error(err) + + c.Assert().ErrorIs(err, context.Canceled) + c.Assert().Nil(core.workingData.executionData) + c.Assert().Nil(core.workingData.txResultErrMsgsData) + }) + + c.Run("txResultErrMsgsRequestTimeout expiration", func() { + c.txResultErrMsgsRequestTimeout = 100 * time.Millisecond + + expectedExecutionData := unittest.BlockExecutionDataFixture() + c.execDataRequester.On("RequestExecutionData", mock.Anything).Return(expectedExecutionData, nil).Once() + + // Transaction result error messages request times out + c.txResultErrMsgsRequester.On("Request", mock.MatchedBy(func(ctx context.Context) bool { + // Verify we received a context with timeout + deadline, hasDeadline := ctx.Deadline() + if !hasDeadline { + return false + } + // Verify the timeout is approximately what we expect + timeUntilDeadline := time.Until(deadline) + return timeUntilDeadline > 0 && timeUntilDeadline <= c.txResultErrMsgsRequestTimeout + })).Run(func(args mock.Arguments) { + // Simulate a slow request by sleeping longer than the timeout + time.Sleep(2 * c.txResultErrMsgsRequestTimeout) + }).Return(([]flow.TransactionResultErrorMessage)(nil), context.DeadlineExceeded).Once() + + core := c.createTestCoreImpl() + ctx := context.Background() + + var err error + unittest.AssertReturnsBefore(c.T(), func() { + err = core.Download(ctx) + }, time.Second) + + c.Require().NoError(err) + c.Assert().Equal(expectedExecutionData, core.workingData.executionData.BlockExecutionData) + c.Assert().Nil(core.workingData.txResultErrMsgsData) + }) +} + +// TestCoreImpl_Index tests the Index method which processes downloaded data. +func (c *CoreImplSuite) TestCoreImpl_Index() { + c.Run("successful indexing", func() { + core := c.createTestCoreImpl() + + // Create execution data with the SAME block ID as the execution result + expectedExecutionData := unittest.BlockExecutionDataFixture( + unittest.WithBlockExecutionDataBlockID(core.executionResult.BlockID), + ) + c.execDataRequester.On("RequestExecutionData", mock.Anything).Return(expectedExecutionData, nil).Once() + + expectedTxResultErrMsgs := unittest.TransactionResultErrorMessagesFixture(1) + c.txResultErrMsgsRequester.On("Request", mock.Anything).Return(expectedTxResultErrMsgs, nil).Once() + + ctx := context.Background() + err := core.Download(ctx) + c.Require().NoError(err) + + err = core.Index() + c.Require().NoError(err) + }) + + c.Run("block ID mismatch", func() { + core := c.createTestCoreImpl() + + // Create execution data with a DIFFERENT block ID than expected + executionData := unittest.BlockExecutionDataFixture() + core.workingData.executionData = execution_data.NewBlockExecutionDataEntity( + unittest.IdentifierFixture(), + executionData, + ) + + err := core.Index() + c.Require().Error(err) + + c.Assert().Contains(err.Error(), "invalid block execution data") + c.Assert().Contains(err.Error(), "expected block_id") + }) + + c.Run("execution data is empty", func() { + core := c.createTestCoreImpl() + + // Do not download data, just index it + err := core.Index() + c.Require().Error(err) + + c.Assert().Contains(err.Error(), "could not index an empty execution data") + }) +} + +// TestCoreImpl_Persist tests the Persist method which persists indexed data to storages and database. +func (c *CoreImplSuite) TestCoreImpl_Persist() { + t := c.T() + + c.Run("successful persistence of empty data", func() { + // Create mocks with proper expectations + c.db = storagemock.NewDB(t) + c.db.On("WithReaderBatchWriter", mock.Anything).Return(nil) + + core := c.createTestCoreImpl() + err := core.Persist() + + c.Require().NoError(err) + }) + + c.Run("persistence with batch commit failure", func() { + // Create a failing DB + c.db = storagemock.NewDB(t) + c.db.On("WithReaderBatchWriter", mock.Anything).Return(assert.AnError) + + // Create CoreImpl with the failing DB + core := c.createTestCoreImpl() + + err := core.Persist() + c.Require().Error(err) + + c.Assert().ErrorIs(err, assert.AnError) + c.Assert().Contains(err.Error(), "failed to persist block data") + }) +} + +// TestCoreImpl_Abandon tests the Abandon method which clears all references for garbage collection. +func (c *CoreImplSuite) TestCoreImpl_Abandon() { + core := c.createTestCoreImpl() + + core.workingData.executionData = unittest.BlockExecutionDatEntityFixture() + core.workingData.txResultErrMsgsData = unittest.TransactionResultErrorMessagesFixture(1) + + err := core.Abandon() + c.Require().NoError(err) + + c.Assert().Nil(core.workingData) +} + +// TestCoreImpl_IntegrationWorkflow tests the complete workflow of download -> index -> persist operations. +func (c *CoreImplSuite) TestCoreImpl_IntegrationWorkflow() { + t := c.T() + + // Set up mocks with proper expectations + c.db = storagemock.NewDB(t) + c.db.On("WithReaderBatchWriter", mock.Anything).Return( + func(fn func(storage.ReaderBatchWriter) error) error { + return fn(storagemock.NewBatch(t)) + }, + ).Maybe() + + core := c.createTestCoreImpl() + ctx := context.Background() + + // Create execution data with the SAME block ID as the execution result + executionData := unittest.BlockExecutionDataFixture( + unittest.WithBlockExecutionDataBlockID(core.executionResult.BlockID), + ) + txResultErrMsgs := unittest.TransactionResultErrorMessagesFixture(1) + + c.execDataRequester.On("RequestExecutionData", mock.Anything).Return(executionData, nil).Once() + c.txResultErrMsgsRequester.On("Request", mock.Anything).Return(txResultErrMsgs, nil).Once() + + err := core.Download(ctx) + c.Require().NoError(err) + + err = core.Index() + c.Require().NoError(err) + + err = core.Persist() + c.Require().NoError(err) + + c.Assert().NotNil(core.workingData.executionData) + c.Assert().Equal(executionData, core.workingData.executionData.BlockExecutionData) + c.Assert().Equal(txResultErrMsgs, core.workingData.txResultErrMsgsData) +} diff --git a/module/executiondatasync/optimistic_sync/execution_result_query_provider.go b/module/executiondatasync/optimistic_sync/execution_result_query_provider.go new file mode 100644 index 00000000000..db0100f1530 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/execution_result_query_provider.go @@ -0,0 +1,51 @@ +package optimistic_sync + +import "github.com/onflow/flow-go/model/flow" + +// Criteria defines the filtering criteria for execution result queries. +// It specifies requirements for execution result selection including the number +// of agreeing executors and requires executor nodes. +type Criteria struct { + // AgreeingExecutorsCount is the number of receipts including the same ExecutionResult + AgreeingExecutorsCount uint + // RequiredExecutors is the list of EN node IDs, one of which must have produced the result + RequiredExecutors flow.IdentifierList +} + +// OverrideWith overrides the original criteria with the incoming criteria, returning a new Criteria object. +// Fields from `override` criteria take precedence when set. +func (c *Criteria) OverrideWith(override Criteria) Criteria { + newCriteria := *c + + if override.AgreeingExecutorsCount > 0 { + newCriteria.AgreeingExecutorsCount = override.AgreeingExecutorsCount + } + + if len(override.RequiredExecutors) > 0 { + newCriteria.RequiredExecutors = override.RequiredExecutors + } + + return newCriteria +} + +// Query contains the result of an execution result query. +// It includes both the execution result and the execution nodes that produced it. +type Query struct { + // ExecutionResult is the execution result for the queried block + ExecutionResult *flow.ExecutionResult + // ExecutionNodes is the list of execution node identities that produced the result + ExecutionNodes flow.IdentitySkeletonList +} + +// ExecutionResultQueryProvider provides execution results and execution nodes based on criteria. +// It allows querying for execution results by block ID with specific filtering criteria +// to ensure consistency and reliability of execution results. +type ExecutionResultQueryProvider interface { + // ExecutionResultQuery retrieves execution results and associated execution nodes for a given block ID + // based on the provided criteria. It returns a Query containing the execution result and + // the execution nodes that produced it. + // + // Expected errors during normal operations: + // - backend.InsufficientExecutionReceipts - found insufficient receipts for given block ID. + ExecutionResultQuery(blockID flow.Identifier, criteria Criteria) (*Query, error) +} diff --git a/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_nodes.go b/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_nodes.go new file mode 100644 index 00000000000..b3f647a00cd --- /dev/null +++ b/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_nodes.go @@ -0,0 +1,127 @@ +package execution_result_query_provider + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" +) + +const ( + // defaultMaxNodesCnt is the maximum number of nodes that will be contacted to complete an API request. + defaultMaxNodesCnt = 3 +) + +// ExecutionNodesSelector handles the selection of execution nodes based on preferences and requirements. +// It encapsulates the logic for choosing execution nodes based on operator preferences, operator requirements, +// and user requirements. +type ExecutionNodesSelector struct { + // preferredENIdentifiers are the execution nodes that the operator prefers to use + preferredENIdentifiers flow.IdentifierList + // requiredENIdentifiers are the execution nodes that the operator requires to use + requiredENIdentifiers flow.IdentifierList + // maxNodesCnt is the maximum number of nodes to select + maxNodesCnt int +} + +// NewExecutionNodes creates a new ExecutionNodesSelector with the provided configuration. +func NewExecutionNodes( + preferredENIdentifiers flow.IdentifierList, + requiredENIdentifiers flow.IdentifierList, +) *ExecutionNodesSelector { + return &ExecutionNodesSelector{ + preferredENIdentifiers: preferredENIdentifiers, + requiredENIdentifiers: requiredENIdentifiers, + maxNodesCnt: defaultMaxNodesCnt, + } +} + +// SelectExecutionNodes finds the subset of execution nodes defined in the identity table that matches +// the provided executor IDs and executor criteria. +// +// The following precedence is used to determine the subset of execution nodes: +// +// 1. If the user's RequiredExecutors is not empty, only select executors from their list +// +// 2. If the operator's `requiredENIdentifiers` is set, only select executors from the required ENs list. +// If the operator's `preferredENIdentifiers` is also set, then the preferred ENs are selected first. +// +// 3. If only the operator's `preferredENIdentifiers` is set, then select any preferred ENs that +// have executed the result, and fall back to selecting any ENs that have executed the result. +// +// 4. If neither preferred nor required nodes are defined, then all execution nodes matching the +// executor IDs are returned. +// +// No errors are expected during normal operations +func (en *ExecutionNodesSelector) SelectExecutionNodes( + executors flow.IdentityList, + userRequiredExecutors flow.IdentifierList, +) (flow.IdentitySkeletonList, error) { + var chosenIDs flow.IdentityList + + // first, check if the user's criteria included any required executors. + // since the result is chosen based on the user's required executors, this should always return + // at least one match. + if len(userRequiredExecutors) > 0 { + chosenIDs = executors.Filter(filter.HasNodeID[flow.Identity](userRequiredExecutors...)) + return chosenIDs.ToSkeleton(), nil + } + + // if required ENs are set, only select executors from the required ENs list + // similarly, if the user does not provide any required executors, then the operator's + // `en.requiredENIdentifiers` are applied, so this should always return at least one match. + if len(en.requiredENIdentifiers) > 0 { + chosenIDs = en.selectFromRequiredENIDs(executors) + return chosenIDs.ToSkeleton(), nil + } + + // if only preferred ENs are set, then select any preferred ENs that have executed the result, + // and fall back to selecting any executors. + if len(en.preferredENIdentifiers) > 0 { + chosenIDs = executors.Filter(filter.HasNodeID[flow.Identity](en.preferredENIdentifiers...)) + if len(chosenIDs) >= en.maxNodesCnt { + return chosenIDs.ToSkeleton(), nil + } + } + + // finally, add any remaining required executors + chosenIDs = en.mergeExecutionNodes(chosenIDs, executors) + return chosenIDs.ToSkeleton(), nil +} + +// selectFromRequiredENIDs finds the subset the provided executors that match the required ENs. +// if `e.preferredENIdentifiers` is not empty, then any preferred ENs that have executed the result +// will be added to the subset. +// otherwise, any executor in the `e.requiredENIdentifiers` list will be returned. +func (en *ExecutionNodesSelector) selectFromRequiredENIDs( + executors flow.IdentityList, +) flow.IdentityList { + var chosenIDs flow.IdentityList + + // add any preferred ENs that have executed the result and return if there are enough nodes + // if both preferred and required ENs are set, then preferred MUST be a subset of required + if len(en.preferredENIdentifiers) > 0 { + chosenIDs = executors.Filter(filter.HasNodeID[flow.Identity](en.preferredENIdentifiers...)) + if len(chosenIDs) >= en.maxNodesCnt { + return chosenIDs + } + } + + // next, add any other required ENs that have executed the result + executedRequired := executors.Filter(filter.HasNodeID[flow.Identity](en.requiredENIdentifiers...)) + chosenIDs = en.mergeExecutionNodes(chosenIDs, executedRequired) + + return chosenIDs +} + +// mergeExecutionNodes adds nodes to chosenIDs if they are not already included +func (en *ExecutionNodesSelector) mergeExecutionNodes(chosenIDs, candidates flow.IdentityList) flow.IdentityList { + for _, candidateNode := range candidates { + if _, exists := chosenIDs.ByNodeID(candidateNode.NodeID); !exists { + chosenIDs = append(chosenIDs, candidateNode) + if len(chosenIDs) >= en.maxNodesCnt { + break + } + } + } + + return chosenIDs +} diff --git a/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_result_query_provider.go b/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_result_query_provider.go new file mode 100644 index 00000000000..130ce354646 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_result_query_provider.go @@ -0,0 +1,213 @@ +package execution_result_query_provider + +import ( + "fmt" + "sort" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// DefaultCriteria is the system default criteria for execution result queries. +var DefaultCriteria = optimistic_sync.Criteria{ + AgreeingExecutorsCount: 2, +} + +var _ optimistic_sync.ExecutionResultQueryProvider = (*ExecutionResultQueryProvider)(nil) + +// ExecutionResultQueryProvider is a container for elements required to retrieve +// execution results and execution node identities for a given block ID based on specified criteria. +type ExecutionResultQueryProvider struct { + log zerolog.Logger + + executionReceipts storage.ExecutionReceipts + state protocol.State + + executionNodes *ExecutionNodesSelector + + rootBlockID flow.Identifier + rootBlockResult *flow.ExecutionResult + + baseCriteria optimistic_sync.Criteria +} + +// NewExecutionResultQueryProvider creates and returns a new instance of +// ExecutionResultQueryProvider. +// +// No errors are expected during normal operations +func NewExecutionResultQueryProvider( + log zerolog.Logger, + state protocol.State, + headers storage.Headers, + executionReceipts storage.ExecutionReceipts, + executionNodes *ExecutionNodesSelector, + operatorCriteria optimistic_sync.Criteria, +) (*ExecutionResultQueryProvider, error) { + // Root block ID and result should not change and could be cached. + sporkRootBlockHeight := state.Params().SporkRootBlockHeight() + rootBlockID, err := headers.BlockIDByHeight(sporkRootBlockHeight) + if err != nil { + return nil, fmt.Errorf("failed to retrieve block ID by height: %w", err) + } + + rootBlockResult, _, err := state.AtBlockID(rootBlockID).SealedResult() + if err != nil { + return nil, fmt.Errorf("failed to retrieve root block result: %w", err) + } + + return &ExecutionResultQueryProvider{ + log: log.With().Str("module", "execution_result_query").Logger(), + executionReceipts: executionReceipts, + state: state, + executionNodes: executionNodes, + rootBlockID: rootBlockID, + rootBlockResult: rootBlockResult, + baseCriteria: DefaultCriteria.OverrideWith(operatorCriteria), + }, nil +} + +// ExecutionResultQuery retrieves execution results and associated execution nodes for a given block ID +// based on the provided criteria. +// +// Expected errors during normal operations: +// - backend.InsufficientExecutionReceipts - found insufficient receipts for given block ID. +func (e *ExecutionResultQueryProvider) ExecutionResultQuery(blockID flow.Identifier, criteria optimistic_sync.Criteria) (*optimistic_sync.Query, error) { + executorIdentities, err := e.state.Final().Identities(filter.HasRole[flow.Identity](flow.RoleExecution)) + if err != nil { + return nil, fmt.Errorf("failed to retrieve execution IDs for root block: %w", err) + } + + // if the block ID is the root block, then use the root ExecutionResult and skip the receipt + // check since there will not be any. + if e.rootBlockID == blockID { + subsetENs, err := e.executionNodes.SelectExecutionNodes(executorIdentities, criteria.RequiredExecutors) + if err != nil { + return nil, fmt.Errorf("failed to choose execution nodes for root block ID %v: %w", e.rootBlockID, err) + } + + return &optimistic_sync.Query{ + ExecutionResult: e.rootBlockResult, + ExecutionNodes: subsetENs, + }, nil + } + + result, executorIDs, err := e.findResultAndExecutors(blockID, criteria) + if err != nil { + return nil, fmt.Errorf("failed to find result and executors for block ID %v: %w", blockID, err) + } + + executors := executorIdentities.Filter(filter.HasNodeID[flow.Identity](executorIDs...)) + subsetENs, err := e.executionNodes.SelectExecutionNodes(executors, criteria.RequiredExecutors) + if err != nil { + return nil, fmt.Errorf("failed to choose execution nodes for block ID %v: %w", blockID, err) + } + + if len(subsetENs) == 0 { + // this is unexpected, and probably indicates there is a bug. + // There are only three ways that SelectExecutionNodes can return an empty list: + // 1. there are no executors for the result + // 2. none of the user's required executors are in the executor list + // 3. none of the operator's required executors are in the executor list + // None of these are possible since there must be at least one AgreeingExecutorsCount. If the + // criteria is met, then there must be at least one acceptable executor. If this is not true, + // then the criteria check must fail. + return nil, fmt.Errorf("no execution nodes found for result %v (blockID: %v): %w", result.ID(), blockID, err) + } + + return &optimistic_sync.Query{ + ExecutionResult: result, + ExecutionNodes: subsetENs, + }, nil +} + +// findResultAndExecutors returns a query response for a given block ID. +// The result must match the provided criteria and have at least one acceptable executor. If multiple +// results are found, then the result with the most executors is returned. +// +// Expected errors during normal operations: +// - backend.InsufficientExecutionReceipts - found insufficient receipts for given block ID. +func (e *ExecutionResultQueryProvider) findResultAndExecutors( + blockID flow.Identifier, + criteria optimistic_sync.Criteria, +) (*flow.ExecutionResult, flow.IdentifierList, error) { + type result struct { + result *flow.ExecutionResult + receipts flow.ExecutionReceiptList + } + + criteria = e.baseCriteria.OverrideWith(criteria) + + // Note: this will return an empty slice with no error if no receipts are found. + allReceipts, err := e.executionReceipts.ByBlockID(blockID) + if err != nil { + return nil, nil, fmt.Errorf("failed to retreive execution receipts for block ID %v: %w", blockID, err) + } + + // find all results that match the criteria and have at least one acceptable executor + results := make([]result, 0) + for _, executionReceiptList := range allReceipts.GroupByResultID() { + executorGroup := executionReceiptList.GroupByExecutorID() + if isExecutorGroupMeetingCriteria(executorGroup, criteria) { + results = append(results, result{ + result: &executionReceiptList[0].ExecutionResult, + receipts: executionReceiptList, + }) + } + } + + if len(results) == 0 { + return nil, nil, common.NewInsufficientExecutionReceipts(blockID, 0) + } + + // sort results by the number of execution nodes in descending order + sort.Slice(results, func(i, j int) bool { + return len(results[i].receipts) > len(results[j].receipts) + }) + + executorIDs := getExecutorIDs(results[0].receipts) + return results[0].result, executorIDs, nil +} + +// isExecutorGroupMeetingCriteria checks if an executor group meets the specified criteria for execution receipts matching. +func isExecutorGroupMeetingCriteria(executorGroup flow.ExecutionReceiptGroupedList, criteria optimistic_sync.Criteria) bool { + if uint(len(executorGroup)) < criteria.AgreeingExecutorsCount { + return false + } + + if len(criteria.RequiredExecutors) > 0 { + hasRequiredExecutor := false + for _, requiredExecutor := range criteria.RequiredExecutors { + if _, ok := executorGroup[requiredExecutor]; ok { + hasRequiredExecutor = true + break + } + } + if !hasRequiredExecutor { + return false + } + } + + // TODO: Implement the `ResultInFork` check here, which iteratively checks ancestors to determine if + // the current result's fork includes the requested result. https://github.com/onflow/flow-go/issues/7587 + + return true +} + +// getExecutorIDs extracts unique executor node IDs from a list of execution receipts. +// It groups receipts by executor ID and returns all unique executor identifiers. +func getExecutorIDs(receipts flow.ExecutionReceiptList) flow.IdentifierList { + receiptGroupedByExecutorID := receipts.GroupByExecutorID() + + executorIDs := make(flow.IdentifierList, 0, len(receiptGroupedByExecutorID)) + for executorID := range receiptGroupedByExecutorID { + executorIDs = append(executorIDs, executorID) + } + + return executorIDs +} diff --git a/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_result_query_provider_test.go b/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_result_query_provider_test.go new file mode 100644 index 00000000000..01099c1c77f --- /dev/null +++ b/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_result_query_provider_test.go @@ -0,0 +1,332 @@ +package execution_result_query_provider + +import ( + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync" + protocol "github.com/onflow/flow-go/state/protocol/mock" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// ExecutionResultQueryProviderSuite is a test suite for testing the ExecutionResultQueryProvider. +type ExecutionResultQueryProviderSuite struct { + suite.Suite + + state *protocol.State + snapshot *protocol.Snapshot + params *protocol.Params + log zerolog.Logger + + receipts *storagemock.ExecutionReceipts + headers *storagemock.Headers + + rootBlock *flow.Header + rootBlockResult *flow.ExecutionResult +} + +func TestExecutionResultQueryProvider(t *testing.T) { + suite.Run(t, new(ExecutionResultQueryProviderSuite)) +} + +// SetupTest initializes the test suite with mock state and receipts storage. +func (suite *ExecutionResultQueryProviderSuite) SetupTest() { + t := suite.T() + suite.log = zerolog.New(zerolog.NewConsoleWriter()) + suite.state = protocol.NewState(t) + suite.snapshot = protocol.NewSnapshot(t) + suite.params = protocol.NewParams(t) + suite.receipts = storagemock.NewExecutionReceipts(t) + suite.headers = storagemock.NewHeaders(t) + + suite.rootBlock = unittest.BlockHeaderFixture() + rootBlockID := suite.rootBlock.ID() + suite.rootBlockResult = unittest.ExecutionResultFixture(unittest.WithExecutionResultBlockID(rootBlockID)) + // This will be used just for the root block + suite.snapshot.On("SealedResult").Return(suite.rootBlockResult, nil, nil).Maybe() + suite.state.On("SealedResult", rootBlockID).Return(flow.ExecutionReceiptList{}).Maybe() + suite.params.On("SporkRootBlockHeight").Return(suite.rootBlock.Height, nil) + suite.headers.On("BlockIDByHeight", suite.rootBlock.Height).Return(rootBlockID, nil) + suite.state.On("Params").Return(suite.params) + suite.state.On("Final").Return(suite.snapshot, nil).Maybe() + suite.state.On("AtBlockID", mock.Anything).Return(suite.snapshot).Maybe() +} + +func (suite *ExecutionResultQueryProviderSuite) createProvider(preferredExecutors flow.IdentifierList, operatorCriteria optimistic_sync.Criteria) *ExecutionResultQueryProvider { + provider, err := NewExecutionResultQueryProvider( + suite.log, + suite.state, + suite.headers, + suite.receipts, + NewExecutionNodes(preferredExecutors, operatorCriteria.RequiredExecutors), + operatorCriteria, + ) + suite.Require().NoError(err) + + return provider +} + +// setupIdentitiesMock sets up the mock for identity-related calls. +func (suite *ExecutionResultQueryProviderSuite) setupIdentitiesMock(allExecutionNodes flow.IdentityList) { + suite.snapshot.On("Identities", mock.Anything).Return( + func(filter flow.IdentityFilter[flow.Identity]) flow.IdentityList { + return allExecutionNodes.Filter(filter) + }, + func(flow.IdentityFilter[flow.Identity]) error { return nil }) +} + +// TestExecutionResultQuery tests the main ExecutionResultQuery function with various scenarios. +func (suite *ExecutionResultQueryProviderSuite) TestExecutionResultQuery() { + totalReceipts := 5 + block := unittest.BlockFixture() + + // generate execution node identities for each receipt + allExecutionNodes := unittest.IdentityListFixture(totalReceipts, unittest.WithRole(flow.RoleExecution)) + + // create two different execution results to test agreement logic + executionResult := unittest.ExecutionResultFixture() + + suite.Run("query with client required executors", func() { + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{}) + + receipts := make(flow.ExecutionReceiptList, totalReceipts) + for i := 0; i < totalReceipts; i++ { + r := unittest.ReceiptForBlockFixture(&block) + r.ExecutorID = allExecutionNodes[i].NodeID + r.ExecutionResult = *executionResult + receipts[i] = r + } + + suite.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) + suite.setupIdentitiesMock(allExecutionNodes) + + // Require specific executors (first two nodes) + requiredExecutors := allExecutionNodes[0:2].NodeIDs() + + query, err := provider.ExecutionResultQuery(block.ID(), optimistic_sync.Criteria{ + AgreeingExecutorsCount: 2, + RequiredExecutors: requiredExecutors, + }) + suite.Require().NoError(err) + + suite.Assert().ElementsMatch(requiredExecutors, query.ExecutionNodes.NodeIDs()) + }) + + suite.Run("successful query with different block results", func() { + requiredExecutors := allExecutionNodes[0:3].NodeIDs() + + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{ + RequiredExecutors: requiredExecutors, + }) + + otherResult := unittest.ExecutionResultFixture() + // Create 3 receipts with the same result (executionResult) and 2 with a different result (otherResult) + receipts := make(flow.ExecutionReceiptList, totalReceipts) + for i := 0; i < 3; i++ { + r := unittest.ReceiptForBlockFixture(&block) + r.ExecutorID = allExecutionNodes[i].NodeID + r.ExecutionResult = *executionResult + receipts[i] = r + } + for i := 3; i < totalReceipts; i++ { + r := unittest.ReceiptForBlockFixture(&block) + r.ExecutorID = allExecutionNodes[i].NodeID + r.ExecutionResult = *otherResult + receipts[i] = r + } + + suite.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) + suite.setupIdentitiesMock(allExecutionNodes) + + query, err := provider.ExecutionResultQuery(block.ID(), optimistic_sync.Criteria{}) + suite.Require().NoError(err) + + suite.Require().Equal(executionResult.ID(), query.ExecutionResult.ID()) + suite.Assert().ElementsMatch(requiredExecutors, query.ExecutionNodes.NodeIDs()) + }) + + suite.Run("insufficient agreeing executors returns error", func() { + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{}) + + // Create a fresh block for this test to ensure proper isolation + insufficientBlock := unittest.BlockFixture() + + // Create a scenario where we have receipts but no result has enough agreeing executors + + // Create only 1 receipt with 1 execution result + r := unittest.ReceiptForBlockFixture(&insufficientBlock) + r.ExecutorID = allExecutionNodes[0].NodeID + r.ExecutionResult = *unittest.ExecutionResultFixture() + receipts := flow.ExecutionReceiptList{ + r, + } + + // Set up a separate mock call for this specific block + suite.receipts.On("ByBlockID", insufficientBlock.ID()).Return(receipts, nil).Once() + suite.setupIdentitiesMock(allExecutionNodes) + + _, err := provider.ExecutionResultQuery(insufficientBlock.ID(), optimistic_sync.Criteria{ + AgreeingExecutorsCount: 2, + RequiredExecutors: allExecutionNodes[0:1].NodeIDs(), + }) + suite.Require().Error(err) + + suite.Assert().True(common.IsInsufficientExecutionReceipts(err)) + }) + + suite.Run("required executors not found returns error", func() { + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{}) + receipts := make(flow.ExecutionReceiptList, totalReceipts) + for i := 0; i < totalReceipts; i++ { + r := unittest.ReceiptForBlockFixture(&block) + r.ExecutorID = allExecutionNodes[i].NodeID + r.ExecutionResult = *executionResult + receipts[i] = r + } + + suite.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) + suite.setupIdentitiesMock(allExecutionNodes) + + // Require executors that didn't produce any receipts + _, err := provider.ExecutionResultQuery(block.ID(), optimistic_sync.Criteria{ + RequiredExecutors: unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)).NodeIDs(), + }) + suite.Require().Error(err) + + suite.Assert().True(common.IsInsufficientExecutionReceipts(err)) + }) +} + +// TestRootBlockHandling tests the special case handling for root blocks. +func (suite *ExecutionResultQueryProviderSuite) TestRootBlockHandling() { + allExecutionNodes := unittest.IdentityListFixture(5, unittest.WithRole(flow.RoleExecution)) + suite.setupIdentitiesMock(allExecutionNodes) + + suite.Run("root block returns execution nodes without execution result", func() { + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{}) + + query, err := provider.ExecutionResultQuery(suite.rootBlock.ID(), optimistic_sync.Criteria{}) + suite.Require().NoError(err) + + suite.Assert().Equal(suite.rootBlockResult, query.ExecutionResult) + suite.Assert().Len(query.ExecutionNodes.NodeIDs(), defaultMaxNodesCnt) + suite.Assert().Subset(allExecutionNodes.NodeIDs(), query.ExecutionNodes.NodeIDs()) + }) + + suite.Run("root block with required executors", func() { + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{}) + + requiredExecutors := allExecutionNodes[0:2].NodeIDs() + criteria := optimistic_sync.Criteria{ + AgreeingExecutorsCount: 1, + RequiredExecutors: requiredExecutors, + } + + query, err := provider.ExecutionResultQuery(suite.rootBlock.ID(), criteria) + suite.Require().NoError(err) + + suite.Assert().Equal(suite.rootBlockResult, query.ExecutionResult) + suite.Assert().ElementsMatch(query.ExecutionNodes.NodeIDs(), requiredExecutors) + }) +} + +// TestPreferredAndRequiredExecutionNodes tests the interaction with preferred and required execution nodes. +func (suite *ExecutionResultQueryProviderSuite) TestPreferredAndRequiredExecutionNodes() { + block := unittest.BlockFixture() + allExecutionNodes := unittest.IdentityListFixture(8, unittest.WithRole(flow.RoleExecution)) + executionResult := unittest.ExecutionResultFixture() + + numReceipts := 6 + // Create receipts from the first `numReceipts` execution nodes + receipts := make(flow.ExecutionReceiptList, numReceipts) + for i := 0; i < numReceipts; i++ { + r := unittest.ReceiptForBlockFixture(&block) + r.ExecutorID = allExecutionNodes[i].NodeID + r.ExecutionResult = *executionResult + receipts[i] = r + } + + suite.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) + suite.setupIdentitiesMock(allExecutionNodes) + + suite.Run("with default optimistic_sync.Criteria", func() { + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{}) + + // optimistic_sync.Criteria are empty to use operator defaults + query, err := provider.ExecutionResultQuery(block.ID(), optimistic_sync.Criteria{}) + suite.Require().NoError(err) + + expectedExecutors := allExecutionNodes[0:3].NodeIDs() + actualExecutors := query.ExecutionNodes.NodeIDs() + + suite.Assert().Len(actualExecutors, defaultMaxNodesCnt) + suite.Assert().ElementsMatch(expectedExecutors, actualExecutors) + }) + + suite.Run("with operator preferred executors", func() { + provider := suite.createProvider(allExecutionNodes[1:5].NodeIDs(), optimistic_sync.Criteria{}) + + // optimistic_sync.Criteria are empty to use operator defaults + query, err := provider.ExecutionResultQuery(block.ID(), optimistic_sync.Criteria{}) + suite.Require().NoError(err) + + actualExecutors := query.ExecutionNodes.NodeIDs() + + suite.Assert().ElementsMatch(provider.executionNodes.preferredENIdentifiers, actualExecutors) + }) + + suite.Run("with operator required executors", func() { + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{ + RequiredExecutors: allExecutionNodes[5:8].NodeIDs(), + }) + + // optimistic_sync.Criteria are empty to use operator defaults + query, err := provider.ExecutionResultQuery(block.ID(), optimistic_sync.Criteria{}) + suite.Require().NoError(err) + + actualExecutors := query.ExecutionNodes.NodeIDs() + + // Just one required executor contains the result + expectedExecutors := provider.executionNodes.requiredENIdentifiers[0:1] + + suite.Assert().ElementsMatch(expectedExecutors, actualExecutors) + }) + + suite.Run("with both: operator preferred & required executors", func() { + provider := suite.createProvider(allExecutionNodes[0:1].NodeIDs(), optimistic_sync.Criteria{ + RequiredExecutors: allExecutionNodes[3:6].NodeIDs(), + }) + + // optimistic_sync.Criteria are empty to use operator defaults + query, err := provider.ExecutionResultQuery(block.ID(), optimistic_sync.Criteria{}) + suite.Require().NoError(err) + + // `preferredENIdentifiers` contain 1 executor, that is not enough, so the logic will get 2 executors from `requiredENIdentifiers` to fill `defaultMaxNodesCnt` executors. + expectedExecutors := append(provider.executionNodes.preferredENIdentifiers, provider.executionNodes.requiredENIdentifiers[0:2]...) + actualExecutors := query.ExecutionNodes.NodeIDs() + + suite.Assert().Len(actualExecutors, defaultMaxNodesCnt) + suite.Assert().ElementsMatch(expectedExecutors, actualExecutors) + }) + + suite.Run("with client preferred executors", func() { + provider := suite.createProvider(allExecutionNodes[0:1].NodeIDs(), optimistic_sync.Criteria{ + RequiredExecutors: allExecutionNodes[2:4].NodeIDs(), + }) + + userCriteria := optimistic_sync.Criteria{ + RequiredExecutors: allExecutionNodes[5:6].NodeIDs(), + } + + query, err := provider.ExecutionResultQuery(block.ID(), userCriteria) + suite.Require().NoError(err) + + suite.Assert().ElementsMatch(userCriteria.RequiredExecutors, query.ExecutionNodes.NodeIDs()) + }) +} diff --git a/module/executiondatasync/optimistic_sync/execution_state_cache.go b/module/executiondatasync/optimistic_sync/execution_state_cache.go new file mode 100644 index 00000000000..58e2e1d4275 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/execution_state_cache.go @@ -0,0 +1,16 @@ +package optimistic_sync + +import "github.com/onflow/flow-go/model/flow" + +// ExecutionStateCache provides access to execution state snapshots for querying data at specific ExecutionResults. +type ExecutionStateCache interface { + // Snapshot returns a view of the execution state as of the provided ExecutionResult. + // The returned Snapshot provides access to execution state data for the fork ending + // on the provided ExecutionResult which extends from the latest sealed result. + // The result may be sealed or unsealed. Only data for finalized blocks is available. + // + // Expected errors during normal operation: + // - storage.ErrNotFound - result is not available, not ready for querying, or does not descend from the latest sealed result. + // - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) + Snapshot(executionResultID flow.Identifier) (Snapshot, error) +} diff --git a/module/executiondatasync/optimistic_sync/factories.go b/module/executiondatasync/optimistic_sync/factories.go new file mode 100644 index 00000000000..06bd673d014 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/factories.go @@ -0,0 +1,13 @@ +package optimistic_sync + +import "github.com/onflow/flow-go/model/flow" + +// CoreFactory is a factory object for creating new Core instances. +type CoreFactory interface { + NewCore(result *flow.ExecutionResult) Core +} + +// PipelineFactory is a factory object for creating new Pipeline instances. +type PipelineFactory interface { + NewPipeline(result *flow.ExecutionResult, isSealed bool) Pipeline +} diff --git a/module/executiondatasync/optimistic_syncing/mock/core.go b/module/executiondatasync/optimistic_sync/mock/core.go similarity index 66% rename from module/executiondatasync/optimistic_syncing/mock/core.go rename to module/executiondatasync/optimistic_sync/mock/core.go index faf0976ee40..77cada5b287 100644 --- a/module/executiondatasync/optimistic_syncing/mock/core.go +++ b/module/executiondatasync/optimistic_sync/mock/core.go @@ -13,6 +13,24 @@ type Core struct { mock.Mock } +// Abandon provides a mock function with no fields +func (_m *Core) Abandon() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Abandon") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + // Download provides a mock function with given fields: ctx func (_m *Core) Download(ctx context.Context) error { ret := _m.Called(ctx) @@ -31,17 +49,17 @@ func (_m *Core) Download(ctx context.Context) error { return r0 } -// Index provides a mock function with given fields: ctx -func (_m *Core) Index(ctx context.Context) error { - ret := _m.Called(ctx) +// Index provides a mock function with no fields +func (_m *Core) Index() error { + ret := _m.Called() if len(ret) == 0 { panic("no return value specified for Index") } var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() } else { r0 = ret.Error(0) } @@ -49,17 +67,17 @@ func (_m *Core) Index(ctx context.Context) error { return r0 } -// Persist provides a mock function with given fields: ctx -func (_m *Core) Persist(ctx context.Context) error { - ret := _m.Called(ctx) +// Persist provides a mock function with no fields +func (_m *Core) Persist() error { + ret := _m.Called() if len(ret) == 0 { panic("no return value specified for Persist") } var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() } else { r0 = ret.Error(0) } diff --git a/module/executiondatasync/optimistic_sync/persisters/block.go b/module/executiondatasync/optimistic_sync/persisters/block.go new file mode 100644 index 00000000000..cfb617e858b --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/block.go @@ -0,0 +1,90 @@ +package persisters + +import ( + "fmt" + "time" + + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync/persisters/stores" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" +) + +// BlockPersister handles persisting of execution data for all PersisterStore-s to database. +// Each BlockPersister instance is created for ONE specific block +type BlockPersister struct { + log zerolog.Logger + + persisterStores []stores.PersisterStore + protocolDB storage.DB + lockManager lockctx.Manager + executionResult *flow.ExecutionResult + header *flow.Header +} + +// NewBlockPersister creates a new block persister. +func NewBlockPersister( + log zerolog.Logger, + protocolDB storage.DB, + lockManager lockctx.Manager, + executionResult *flow.ExecutionResult, + header *flow.Header, + persisterStores []stores.PersisterStore, +) *BlockPersister { + log = log.With(). + Str("component", "block_persister"). + Hex("block_id", logging.ID(executionResult.BlockID)). + Uint64("height", header.Height). + Logger() + + persister := &BlockPersister{ + log: log, + persisterStores: persisterStores, + protocolDB: protocolDB, + executionResult: executionResult, + header: header, + lockManager: lockManager, + } + + persister.log.Info(). + Int("batch_persisters_count", len(persisterStores)). + Msg("block persisters initialized") + + return persister +} + +// Persist save data in provided persisted stores and commit updates to the database. +// No errors are expected during normal operations +func (p *BlockPersister) Persist() error { + p.log.Debug().Msg("started to persist execution data") + start := time.Now() + + lctx := p.lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertCollection) + if err != nil { + return fmt.Errorf("could not acquire lock for inserting light collections: %w", err) + } + defer lctx.Release() + + err = p.protocolDB.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { + for _, persister := range p.persisterStores { + if err := persister.Persist(lctx, batch); err != nil { + return err + } + } + return nil + }) + + if err != nil { + return fmt.Errorf("failed to commit batch: %w", err) + } + + p.log.Debug(). + Dur("duration_ms", time.Since(start)). + Msg("successfully prepared execution data for persistence") + + return nil +} diff --git a/module/executiondatasync/optimistic_sync/persisters/block_test.go b/module/executiondatasync/optimistic_sync/persisters/block_test.go new file mode 100644 index 00000000000..65f35ae75f0 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/block_test.go @@ -0,0 +1,300 @@ +package persisters + +import ( + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync/persisters/stores" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/store/inmemory/unsynchronized" + "github.com/onflow/flow-go/utils/unittest" +) + +type PersisterSuite struct { + suite.Suite + persister *BlockPersister + inMemoryRegisters *unsynchronized.Registers + inMemoryEvents *unsynchronized.Events + inMemoryCollections *unsynchronized.Collections + inMemoryTransactions *unsynchronized.Transactions + inMemoryResults *unsynchronized.LightTransactionResults + inMemoryTxResultErrMsg *unsynchronized.TransactionResultErrorMessages + registers *storagemock.RegisterIndex + events *storagemock.Events + collections *storagemock.Collections + transactions *storagemock.Transactions + results *storagemock.LightTransactionResults + txResultErrMsg *storagemock.TransactionResultErrorMessages + latestPersistedSealedResult *storagemock.LatestPersistedSealedResult + database *storagemock.DB + executionResult *flow.ExecutionResult + header *flow.Header +} + +func TestPersisterSuite(t *testing.T) { + t.Parallel() + suite.Run(t, new(PersisterSuite)) +} + +func (p *PersisterSuite) SetupTest() { + lockManager := storage.NewTestingLockManager() + t := p.T() + + block := unittest.BlockFixture() + p.header = block.Header + p.executionResult = unittest.ExecutionResultFixture(unittest.WithBlock(&block)) + + p.inMemoryRegisters = unsynchronized.NewRegisters(p.header.Height) + p.inMemoryEvents = unsynchronized.NewEvents() + p.inMemoryTransactions = unsynchronized.NewTransactions() + p.inMemoryCollections = unsynchronized.NewCollections(p.inMemoryTransactions) + p.inMemoryResults = unsynchronized.NewLightTransactionResults() + p.inMemoryTxResultErrMsg = unsynchronized.NewTransactionResultErrorMessages() + + p.registers = storagemock.NewRegisterIndex(t) + p.events = storagemock.NewEvents(t) + p.collections = storagemock.NewCollections(t) + p.transactions = storagemock.NewTransactions(t) + p.results = storagemock.NewLightTransactionResults(t) + p.txResultErrMsg = storagemock.NewTransactionResultErrorMessages(t) + p.latestPersistedSealedResult = storagemock.NewLatestPersistedSealedResult(t) + + p.database = storagemock.NewDB(t) + p.database.On("WithReaderBatchWriter", mock.Anything).Return( + func(fn func(storage.ReaderBatchWriter) error) error { + return fn(storagemock.NewBatch(t)) + }, + ) + + p.persister = NewBlockPersister( + zerolog.Nop(), + p.database, + lockManager, + p.executionResult, + p.header, + []stores.PersisterStore{ + stores.NewEventsStore(p.inMemoryEvents, p.events, p.executionResult.BlockID), + stores.NewResultsStore(p.inMemoryResults, p.results, p.executionResult.BlockID), + stores.NewCollectionsStore(p.inMemoryCollections, p.collections, lockManager), + stores.NewTransactionsStore(p.inMemoryTransactions, p.transactions), + stores.NewTxResultErrMsgStore(p.inMemoryTxResultErrMsg, p.txResultErrMsg, p.executionResult.BlockID), + stores.NewLatestSealedResultStore(p.latestPersistedSealedResult, p.executionResult.ID(), p.header.Height), + }, + ) +} + +func (p *PersisterSuite) populateInMemoryStorages() { + regEntries := make(flow.RegisterEntries, 3) + for i := 0; i < 3; i++ { + regEntries[i] = unittest.RegisterEntryFixture() + } + err := p.inMemoryRegisters.Store(regEntries, p.header.Height) + p.Require().NoError(err) + + eventsList := unittest.EventsFixture(5) + err = p.inMemoryEvents.Store(p.executionResult.BlockID, []flow.EventsList{eventsList}) + p.Require().NoError(err) + + for i := 0; i < 2; i++ { + collection := unittest.CollectionFixture(2) + _, err := p.inMemoryCollections.Store(&collection) + p.Require().NoError(err) + + for _, tx := range collection.Transactions { + err := p.inMemoryTransactions.Store(tx) + p.Require().NoError(err) + } + } + + results := unittest.LightTransactionResultsFixture(4) + err = p.inMemoryResults.Store(p.executionResult.BlockID, results) + p.Require().NoError(err) + + txResultErrMsgs := make([]flow.TransactionResultErrorMessage, 2) + executorID := unittest.IdentifierFixture() + for i := 0; i < 2; i++ { + txResultErrMsgs[i] = flow.TransactionResultErrorMessage{ + TransactionID: unittest.IdentifierFixture(), + ErrorMessage: "expected test error", + Index: uint32(i), + ExecutorID: executorID, + } + } + err = p.inMemoryTxResultErrMsg.Store(p.executionResult.BlockID, txResultErrMsgs) + p.Require().NoError(err) +} + +func (p *PersisterSuite) TestPersister_PersistWithEmptyData() { + t := p.T() + + err := p.inMemoryEvents.Store(p.executionResult.BlockID, []flow.EventsList{}) + p.Require().NoError(err) + + err = p.inMemoryResults.Store(p.executionResult.BlockID, []flow.LightTransactionResult{}) + p.Require().NoError(err) + + err = p.inMemoryTxResultErrMsg.Store(p.executionResult.BlockID, []flow.TransactionResultErrorMessage{}) + p.Require().NoError(err) + + p.latestPersistedSealedResult.On("BatchSet", p.executionResult.ID(), p.header.Height, mock.Anything).Return(nil).Once() + + err = p.persister.Persist() + p.Require().NoError(err) + + // Verify other storages were not called since the data is empty + p.events.AssertNotCalled(t, "BatchStore") + p.results.AssertNotCalled(t, "BatchStore") + p.collections.AssertNotCalled(t, "BatchStoreAndIndexByTransaction") + p.transactions.AssertNotCalled(t, "BatchStore") + p.txResultErrMsg.AssertNotCalled(t, "BatchStore") +} + +func (p *PersisterSuite) TestPersister_PersistWithData() { + p.populateInMemoryStorages() + + storedEvents := make([]flow.EventsList, 0) + storedCollections := make([]flow.LightCollection, 0) + storedTransactions := make([]flow.TransactionBody, 0) + storedResults := make([]flow.LightTransactionResult, 0) + storedTxResultErrMsgs := make([]flow.TransactionResultErrorMessage, 0) + + p.events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + se, ok := args.Get(1).([]flow.EventsList) + p.Require().True(ok) + storedEvents = se + }).Return(nil) + + p.results.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + sr, ok := args.Get(1).([]flow.LightTransactionResult) + p.Require().True(ok) + storedResults = sr + }).Return(nil) + + numberOfCollections := len(p.inMemoryCollections.Data()) + p.collections.On("BatchStoreAndIndexByTransaction", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + collection, ok := args.Get(1).(*flow.Collection) + p.Require().True(ok) + light := collection.Light() + storedCollections = append(storedCollections, light) + }).Return(flow.LightCollection{}, nil).Times(numberOfCollections) + + p.transactions.On("BatchStore", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + transaction, ok := args.Get(0).(*flow.TransactionBody) + p.Require().True(ok) + storedTransactions = append(storedTransactions, *transaction) + }).Return(nil).Times(len(p.inMemoryTransactions.Data())) + + p.txResultErrMsg.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + terrm, ok := args.Get(1).([]flow.TransactionResultErrorMessage) + p.Require().True(ok) + storedTxResultErrMsgs = terrm + }).Return(nil) + + p.latestPersistedSealedResult.On("BatchSet", p.executionResult.ID(), p.header.Height, mock.Anything).Return(nil).Once() + + err := p.persister.Persist() + p.Require().NoError(err) + + // Convert full collections to light collections for comparison + expectedLightCollections := make([]flow.LightCollection, 0, len(p.inMemoryCollections.Data())) + for _, collection := range p.inMemoryCollections.Data() { + expectedLightCollections = append(expectedLightCollections, collection.Light()) + } + + // Verify expected data was stored + p.Assert().ElementsMatch([]flow.EventsList{p.inMemoryEvents.Data()}, storedEvents) + p.Assert().ElementsMatch(p.inMemoryResults.Data(), storedResults) + p.Assert().ElementsMatch(expectedLightCollections, storedCollections) + p.Assert().ElementsMatch(p.inMemoryTransactions.Data(), storedTransactions) + p.Assert().ElementsMatch(p.inMemoryTxResultErrMsg.Data(), storedTxResultErrMsgs) +} + +func (p *PersisterSuite) TestPersister_PersistErrorHandling() { + tests := []struct { + name string + setupMocks func() + expectedError string + }{ + { + name: "EventsBatchStoreError", + setupMocks: func() { + p.events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(assert.AnError).Once() + }, + expectedError: "could not add events to batch", + }, + { + name: "ResultsBatchStoreError", + setupMocks: func() { + p.events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.results.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(assert.AnError).Once() + }, + expectedError: "could not add transaction results to batch", + }, + { + name: "CollectionsStoreError", + setupMocks: func() { + p.events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.results.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.collections.On("BatchStoreAndIndexByTransaction", mock.Anything, mock.Anything, mock.Anything).Return(flow.LightCollection{}, assert.AnError).Once() + }, + expectedError: "could not add light collections to batch", + }, + { + name: "TransactionsStoreError", + setupMocks: func() { + p.events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.results.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + numberOfCollections := len(p.inMemoryCollections.Data()) + p.collections.On("BatchStoreAndIndexByTransaction", mock.Anything, mock.Anything, mock.Anything).Return(flow.LightCollection{}, nil).Times(numberOfCollections) + p.transactions.On("BatchStore", mock.Anything, mock.Anything).Return(assert.AnError).Once() + }, + expectedError: "could not add transactions to batch", + }, + { + name: "TxResultErrMsgStoreError", + setupMocks: func() { + p.events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.results.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + numberOfCollections := len(p.inMemoryCollections.Data()) + p.collections.On("BatchStoreAndIndexByTransaction", mock.Anything, mock.Anything, mock.Anything).Return(flow.LightCollection{}, nil).Times(numberOfCollections) + numberOfTransactions := len(p.inMemoryTransactions.Data()) + p.transactions.On("BatchStore", mock.Anything, mock.Anything).Return(nil).Times(numberOfTransactions) + p.txResultErrMsg.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(assert.AnError).Once() + }, + expectedError: "could not add transaction result error messages to batch", + }, + { + name: "LatestPersistedSealedResultStoreError", + setupMocks: func() { + p.events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.results.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + numberOfCollections := len(p.inMemoryCollections.Data()) + p.collections.On("BatchStoreAndIndexByTransaction", mock.Anything, mock.Anything, mock.Anything).Return(flow.LightCollection{}, nil).Times(numberOfCollections) + numberOfTransactions := len(p.inMemoryTransactions.Data()) + p.transactions.On("BatchStore", mock.Anything, mock.Anything).Return(nil).Times(numberOfTransactions) + p.txResultErrMsg.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(nil).Once() + p.latestPersistedSealedResult.On("BatchSet", p.executionResult.ID(), p.header.Height, mock.Anything).Return(assert.AnError).Once() + }, + expectedError: "could not persist latest sealed result", + }, + } + + p.populateInMemoryStorages() + + for _, test := range tests { + p.Run(test.name, func() { + test.setupMocks() + + err := p.persister.Persist() + p.Require().Error(err) + + p.Assert().Contains(err.Error(), test.expectedError) + }) + } +} diff --git a/module/executiondatasync/optimistic_sync/persisters/registers.go b/module/executiondatasync/optimistic_sync/persisters/registers.go new file mode 100644 index 00000000000..4f9f0ba163a --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/registers.go @@ -0,0 +1,42 @@ +package persisters + +import ( + "fmt" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store/inmemory/unsynchronized" +) + +// RegistersPersister handles registers +type RegistersPersister struct { + inMemoryRegisters *unsynchronized.Registers + permanentRegisters storage.RegisterIndex + height uint64 +} + +func NewRegistersPersister( + inMemoryRegisters *unsynchronized.Registers, + permanentRegisters storage.RegisterIndex, + height uint64, +) *RegistersPersister { + return &RegistersPersister{ + inMemoryRegisters: inMemoryRegisters, + permanentRegisters: permanentRegisters, + height: height, + } +} + +// Persist persists registers +// No errors are expected during normal operations +func (r *RegistersPersister) Persist() error { + registerData, err := r.inMemoryRegisters.Data(r.height) + if err != nil { + return fmt.Errorf("could not get data from registers: %w", err) + } + + if err := r.permanentRegisters.Store(registerData, r.height); err != nil { + return fmt.Errorf("could not persist registers: %w", err) + } + + return nil +} diff --git a/module/executiondatasync/optimistic_sync/persisters/registers_test.go b/module/executiondatasync/optimistic_sync/persisters/registers_test.go new file mode 100644 index 00000000000..4bad0b8566d --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/registers_test.go @@ -0,0 +1,126 @@ +package persisters + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/model/flow" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/store/inmemory/unsynchronized" + "github.com/onflow/flow-go/utils/unittest" +) + +// RegistersPersisterSuite tests the RegistersPersister separately since it uses a different database +type RegistersPersisterSuite struct { + suite.Suite + persister *RegistersPersister + inMemoryRegisters *unsynchronized.Registers + registers *storagemock.RegisterIndex + header *flow.Header +} + +func TestRegistersPersisterSuite(t *testing.T) { + t.Parallel() + suite.Run(t, new(RegistersPersisterSuite)) +} + +func (r *RegistersPersisterSuite) SetupTest() { + block := unittest.BlockFixture() + r.header = block.Header + + r.inMemoryRegisters = unsynchronized.NewRegisters(r.header.Height) + r.registers = storagemock.NewRegisterIndex(r.T()) + + r.persister = NewRegistersPersister( + r.inMemoryRegisters, + r.registers, + r.header.Height, + ) +} + +func (r *RegistersPersisterSuite) TestRegistersPersister_PersistWithEmptyData() { + // Registers must be stored for every height, even if empty + storedRegisters := make([]flow.RegisterEntry, 0) + r.registers.On("Store", mock.Anything, r.header.Height).Run(func(args mock.Arguments) { + sr, ok := args.Get(0).(flow.RegisterEntries) + r.Require().True(ok) + storedRegisters = sr + }).Return(nil).Once() + + err := r.persister.Persist() + r.Require().NoError(err) + + // Verify empty registers were stored + r.Assert().Empty(storedRegisters) + r.registers.AssertExpectations(r.T()) +} + +func (r *RegistersPersisterSuite) TestRegistersPersister_PersistWithData() { + // Populate register data + regEntries := make(flow.RegisterEntries, 3) + for i := 0; i < 3; i++ { + regEntries[i] = unittest.RegisterEntryFixture() + } + err := r.inMemoryRegisters.Store(regEntries, r.header.Height) + r.Require().NoError(err) + + // Setup mock to capture stored data + storedRegisters := make([]flow.RegisterEntry, 0) + r.registers.On("Store", mock.Anything, r.header.Height).Run(func(args mock.Arguments) { + sr, ok := args.Get(0).(flow.RegisterEntries) + r.Require().True(ok) + storedRegisters = sr + }).Return(nil).Once() + + err = r.persister.Persist() + r.Require().NoError(err) + + // Verify the correct data was stored + expectedRegisters, err := r.inMemoryRegisters.Data(r.header.Height) + r.Require().NoError(err) + r.Assert().ElementsMatch(expectedRegisters, storedRegisters) + r.registers.AssertExpectations(r.T()) +} + +func (r *RegistersPersisterSuite) TestRegistersPersister_ErrorHandling() { + tests := []struct { + name string + setupMocks func() + expectedError string + }{ + { + name: "RegistersStoreError", + setupMocks: func() { + r.registers.On("Store", mock.Anything, r.header.Height).Return(assert.AnError).Once() + }, + expectedError: "could not persist registers", + }, + { + name: "RegistersDataError", + setupMocks: func() { + // Create a persisters with wrong height to trigger data error + wrongPersister := NewRegistersPersister( + r.inMemoryRegisters, + r.registers, + r.header.Height+1, // Wrong height + ) + r.persister = wrongPersister + }, + expectedError: "could not get data from registers", + }, + } + + for _, test := range tests { + r.Run(test.name, func() { + test.setupMocks() + + err := r.persister.Persist() + r.Require().Error(err) + + r.Assert().Contains(err.Error(), test.expectedError) + }) + } +} diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/events.go b/module/executiondatasync/optimistic_sync/persisters/stores/events.go new file mode 100644 index 00000000000..629adcc2a6f --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/stores/events.go @@ -0,0 +1,49 @@ +package stores + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store/inmemory/unsynchronized" +) + +var _ PersisterStore = (*EventsStore)(nil) + +// EventsStore handles persisting events +type EventsStore struct { + inMemoryEvents *unsynchronized.Events + persistedEvents storage.Events + blockID flow.Identifier +} + +func NewEventsStore( + inMemoryEvents *unsynchronized.Events, + persistedEvents storage.Events, + blockID flow.Identifier, +) *EventsStore { + return &EventsStore{ + inMemoryEvents: inMemoryEvents, + persistedEvents: persistedEvents, + blockID: blockID, + } +} + +// Persist adds events to the batch. +// No errors are expected during normal operations +func (e *EventsStore) Persist(lctx lockctx.Proof, batch storage.ReaderBatchWriter) error { + eventsList, err := e.inMemoryEvents.ByBlockID(e.blockID) + if err != nil { + return fmt.Errorf("could not get events: %w", err) + } + + if len(eventsList) > 0 { + if err := e.persistedEvents.BatchStore(e.blockID, []flow.EventsList{eventsList}, batch); err != nil { + return fmt.Errorf("could not add events to batch: %w", err) + } + } + + return nil +} diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/latest_sealed_result.go b/module/executiondatasync/optimistic_sync/persisters/stores/latest_sealed_result.go new file mode 100644 index 00000000000..ce0251003ba --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/stores/latest_sealed_result.go @@ -0,0 +1,40 @@ +package stores + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +var _ PersisterStore = (*LatestSealedResultStore)(nil) + +// LatestSealedResultStore handles persisting transaction result error messages +type LatestSealedResultStore struct { + latestPersistedSealedResult storage.LatestPersistedSealedResult + height uint64 + executionResultID flow.Identifier +} + +func NewLatestSealedResultStore( + latestPersistedSealedResult storage.LatestPersistedSealedResult, + executionResultID flow.Identifier, + height uint64, +) *LatestSealedResultStore { + return &LatestSealedResultStore{ + latestPersistedSealedResult: latestPersistedSealedResult, + height: height, + executionResultID: executionResultID, + } +} + +// Persist adds the latest sealed result to the batch. +// No errors are expected during normal operations +func (t *LatestSealedResultStore) Persist(lctx lockctx.Proof, batch storage.ReaderBatchWriter) error { + if err := t.latestPersistedSealedResult.BatchSet(t.executionResultID, t.height, batch); err != nil { + return fmt.Errorf("could not persist latest sealed result: %w", err) + } + return nil +} diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/light_collections.go b/module/executiondatasync/optimistic_sync/persisters/stores/light_collections.go new file mode 100644 index 00000000000..12d4df45677 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/stores/light_collections.go @@ -0,0 +1,41 @@ +package stores + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store/inmemory/unsynchronized" +) + +var _ PersisterStore = (*LightCollectionsStore)(nil) + +// LightCollectionsStore handles persisting light collections +type LightCollectionsStore struct { + inMemoryCollections *unsynchronized.Collections + persistedCollections storage.Collections +} + +func NewCollectionsStore( + inMemoryCollections *unsynchronized.Collections, + persistedCollections storage.Collections, + lockManager storage.LockManager, +) *LightCollectionsStore { + return &LightCollectionsStore{ + inMemoryCollections: inMemoryCollections, + persistedCollections: persistedCollections, + } +} + +// Persist adds light collections to the batch. +// No errors are expected during normal operations +func (c *LightCollectionsStore) Persist(lctx lockctx.Proof, batch storage.ReaderBatchWriter) error { + for _, collection := range c.inMemoryCollections.Data() { + if _, err := c.persistedCollections.BatchStoreAndIndexByTransaction(lctx, &collection, batch); err != nil { + return fmt.Errorf("could not add light collections to batch: %w", err) + } + } + + return nil +} diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/persister_store.go b/module/executiondatasync/optimistic_sync/persisters/stores/persister_store.go new file mode 100644 index 00000000000..436b93e3f35 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/stores/persister_store.go @@ -0,0 +1,14 @@ +package stores + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/storage" +) + +// PersisterStore is the interface to handle persisting of a data type to persisted storage using batch operation. +type PersisterStore interface { + // Persist adds data to the batch for later commitment. + // No errors are expected during normal operations + Persist(lctx lockctx.Proof, batch storage.ReaderBatchWriter) error +} diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/results.go b/module/executiondatasync/optimistic_sync/persisters/stores/results.go new file mode 100644 index 00000000000..bcc1116462f --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/stores/results.go @@ -0,0 +1,49 @@ +package stores + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store/inmemory/unsynchronized" +) + +var _ PersisterStore = (*ResultsStore)(nil) + +// ResultsStore handles persisting transaction results +type ResultsStore struct { + inMemoryResults *unsynchronized.LightTransactionResults + persistedResults storage.LightTransactionResults + blockID flow.Identifier +} + +func NewResultsStore( + inMemoryResults *unsynchronized.LightTransactionResults, + persistedResults storage.LightTransactionResults, + blockID flow.Identifier, +) *ResultsStore { + return &ResultsStore{ + inMemoryResults: inMemoryResults, + persistedResults: persistedResults, + blockID: blockID, + } +} + +// Persist adds results to the batch. +// No errors are expected during normal operations +func (r *ResultsStore) Persist(lctx lockctx.Proof, batch storage.ReaderBatchWriter) error { + results, err := r.inMemoryResults.ByBlockID(r.blockID) + if err != nil { + return fmt.Errorf("could not get results: %w", err) + } + + if len(results) > 0 { + if err := r.persistedResults.BatchStore(r.blockID, results, batch); err != nil { + return fmt.Errorf("could not add transaction results to batch: %w", err) + } + } + + return nil +} diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/transaction_result_error_messages.go b/module/executiondatasync/optimistic_sync/persisters/stores/transaction_result_error_messages.go new file mode 100644 index 00000000000..a589c772592 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/stores/transaction_result_error_messages.go @@ -0,0 +1,49 @@ +package stores + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store/inmemory/unsynchronized" +) + +var _ PersisterStore = (*TxResultErrMsgStore)(nil) + +// TxResultErrMsgStore handles persisting transaction result error messages +type TxResultErrMsgStore struct { + inMemoryTxResultErrMsg *unsynchronized.TransactionResultErrorMessages + persistedTxResultErrMsg storage.TransactionResultErrorMessages + blockID flow.Identifier +} + +func NewTxResultErrMsgStore( + inMemoryTxResultErrMsg *unsynchronized.TransactionResultErrorMessages, + persistedTxResultErrMsg storage.TransactionResultErrorMessages, + blockID flow.Identifier, +) *TxResultErrMsgStore { + return &TxResultErrMsgStore{ + inMemoryTxResultErrMsg: inMemoryTxResultErrMsg, + persistedTxResultErrMsg: persistedTxResultErrMsg, + blockID: blockID, + } +} + +// Persist adds transaction result error messages to the batch. +// No errors are expected during normal operations +func (t *TxResultErrMsgStore) Persist(lctx lockctx.Proof, batch storage.ReaderBatchWriter) error { + txResultErrMsgs, err := t.inMemoryTxResultErrMsg.ByBlockID(t.blockID) + if err != nil { + return fmt.Errorf("could not get transaction result error messages: %w", err) + } + + if len(txResultErrMsgs) > 0 { + if err := t.persistedTxResultErrMsg.BatchStore(t.blockID, txResultErrMsgs, batch); err != nil { + return fmt.Errorf("could not add transaction result error messages to batch: %w", err) + } + } + + return nil +} diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/transactions.go b/module/executiondatasync/optimistic_sync/persisters/stores/transactions.go new file mode 100644 index 00000000000..2740e625f15 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/stores/transactions.go @@ -0,0 +1,40 @@ +package stores + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store/inmemory/unsynchronized" +) + +var _ PersisterStore = (*TransactionsStore)(nil) + +// TransactionsStore handles persisting transactions +type TransactionsStore struct { + inMemoryTransactions *unsynchronized.Transactions + persistedTransactions storage.Transactions +} + +func NewTransactionsStore( + inMemoryTransactions *unsynchronized.Transactions, + persistedTransactions storage.Transactions, +) *TransactionsStore { + return &TransactionsStore{ + inMemoryTransactions: inMemoryTransactions, + persistedTransactions: persistedTransactions, + } +} + +// Persist adds transactions to the batch. +// No errors are expected during normal operations +func (t *TransactionsStore) Persist(lctx lockctx.Proof, batch storage.ReaderBatchWriter) error { + for _, transaction := range t.inMemoryTransactions.Data() { + if err := t.persistedTransactions.BatchStore(&transaction, batch); err != nil { + return fmt.Errorf("could not add transactions to batch: %w", err) + } + } + + return nil +} diff --git a/module/executiondatasync/optimistic_sync/pipeline.go b/module/executiondatasync/optimistic_sync/pipeline.go new file mode 100644 index 00000000000..1d93e349340 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/pipeline.go @@ -0,0 +1,450 @@ +package optimistic_sync + +import ( + "context" + "errors" + "fmt" + + "github.com/gammazero/workerpool" + "github.com/rs/zerolog" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" +) + +var ( + // ErrInvalidTransition is returned when a state transition is invalid. + ErrInvalidTransition = errors.New("invalid state transition") +) + +// PipelineStateProvider is an interface that provides a pipeline's state. +type PipelineStateProvider interface { + // GetState returns the current state of the pipeline. + GetState() State +} + +// PipelineStateConsumer is a receiver of the pipeline state updates. +// PipelineStateConsumer implementations must be +// - NON-BLOCKING and consume the state updates without noteworthy delay +type PipelineStateConsumer interface { + // OnStateUpdated is called when a pipeline's state changes to notify the receiver of the new state. + // This method is will be called in the same goroutine that runs the pipeline, so it must not block. + OnStateUpdated(newState State) +} + +// Pipeline represents a processing pipelined state machine for a single ExecutionResult. +// The state machine is initialized in the Pending state. +// +// The state machine is designed to be run in a single goroutine. The Run method must only be called once. +type Pipeline interface { + PipelineStateProvider + + // Run starts the pipeline processing and blocks until completion or context cancellation. + // CAUTION: not concurrency safe! Run must only be called once. + // + // Expected Errors: + // - context.Canceled: when the context is canceled + // - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) + Run(ctx context.Context, core Core, parentState State) error + + // SetSealed marks the pipeline's result as sealed, which enables transitioning from StateWaitingPersist to StatePersisting. + SetSealed() + + // OnParentStateUpdated updates the pipeline's parent's state. + OnParentStateUpdated(parentState State) + + // Abandon marks the pipeline as abandoned. + Abandon() +} + +var _ Pipeline = (*PipelineImpl)(nil) + +// worker implements a single worker goroutine that processes tasks submitted to it. +// It supports submission of context-based tasks that return an error. +// Each error that occurs during task execution is sent to a dedicated error channel. +// The primary purpose of the worker is to handle tasks in a non-blocking manner, while still allowing the parent thread +// to observe and handle errors that occur during task execution. +type worker struct { + ctx context.Context + cancel context.CancelFunc + pool *workerpool.WorkerPool + errChan chan error +} + +// newWorker creates a single worker. +func newWorker() *worker { + ctx, cancel := context.WithCancel(context.Background()) + return &worker{ + ctx: ctx, + cancel: cancel, + pool: workerpool.New(1), + errChan: make(chan error, 1), + } +} + +// Submit submits a new task for processing, each error will be propagated in a specific channel. +// Might block the worker if there is no one reading from the error channel and errors are happening. +func (w *worker) Submit(task func(ctx context.Context) error) { + w.pool.Submit(func() { + err := task(w.ctx) + if err != nil && !errors.Is(err, context.Canceled) { + w.errChan <- err + } + }) +} + +// ErrChan returns the channel where errors are delivered from executed tasks. +func (w *worker) ErrChan() <-chan error { + return w.errChan +} + +// StopWait stops the worker pool and waits for all queued tasks to complete. +// No additional tasks may be submitted, but all pending tasks are executed by workers before this function returns. +// This function is blocking and guarantees that any error that occurred during the execution of tasks will be delivered +// to the caller as a return value of this function. +// Any error that was delivered during execution will be delivered to the caller. +func (w *worker) StopWait() error { + w.cancel() + w.pool.StopWait() + + defer close(w.errChan) + select { + case err := <-w.errChan: + return err + default: + return nil + } +} + +// PipelineImpl implements the Pipeline interface +type PipelineImpl struct { + log zerolog.Logger + stateConsumer PipelineStateConsumer + stateChangedNotifier engine.Notifier + core Core + worker *worker + + // The following fields are accessed externally. they are stored using atomics to avoid + // blocking the caller. + state *atomic.Int32 + parentStateCache *atomic.Int32 + isSealed *atomic.Bool + isAbandoned *atomic.Bool + isIndexed *atomic.Bool +} + +// NewPipeline creates a new processing pipeline. +// The pipeline is initialized in the Pending state. +func NewPipeline( + log zerolog.Logger, + executionResult *flow.ExecutionResult, + isSealed bool, + stateReceiver PipelineStateConsumer, +) *PipelineImpl { + log = log.With(). + Str("component", "pipeline"). + Str("execution_result_id", executionResult.ExecutionDataID.String()). + Str("block_id", executionResult.BlockID.String()). + Logger() + + return &PipelineImpl{ + log: log, + stateConsumer: stateReceiver, + worker: newWorker(), + state: atomic.NewInt32(int32(StatePending)), + parentStateCache: atomic.NewInt32(int32(StatePending)), + isSealed: atomic.NewBool(isSealed), + isAbandoned: atomic.NewBool(false), + isIndexed: atomic.NewBool(false), + stateChangedNotifier: engine.NewNotifier(), + } +} + +// Run starts the pipeline processing and blocks until completion or context cancellation. +// CAUTION: not concurrency safe! Run must only be called once. +// +// Expected Errors: +// - context.Canceled: when the context is canceled +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +func (p *PipelineImpl) Run(ctx context.Context, core Core, parentState State) error { + if p.core != nil { + return irrecoverable.NewExceptionf("pipeline has been already started, it is not designed to be run again") + } + p.core = core + p.parentStateCache.Store(int32(parentState)) + // run the main event loop by calling p.loop. any error returned from it needs to be propagated to the caller. + // IMPORTANT: after the main loop has exited we need to ensure that worker goroutine has also finished + // because we need to ensure that it can report any error that has happened during the execution of detached operation. + // By calling StopWait we ensure that worker has stopped which also guarantees that any error has been delivered to the + // error channel and returned as result of StopWait. Without waiting for the worker to stop, we might skip some errors + // since the worker didn't have a chance to report them yet, and we have already returned from the Run method. + return errors.Join(p.loop(ctx), p.worker.StopWait()) +} + +// loop implements the main event loop for state machine. It reacts on different events and performs operations upon +// entering or leaving some state. +// loop will perform a blocking operation until one of next things happens, whatever happens first: +// 1. parent context signals that it is no longer valid. +// 2. the worker thread has received an error. It's not safe to continue execution anymore, so this error needs to be propagated +// to the caller. +// 3. Pipeline has successfully entered terminal state. +// Pipeline won't and shouldn't perform any state transitions after returning from this function. +// Expected Errors: +// - context.Canceled: when the context is canceled +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +func (p *PipelineImpl) loop(ctx context.Context) error { + // try to start processing in case we are able to. + p.stateChangedNotifier.Notify() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-p.worker.ErrChan(): + return err + case <-p.stateChangedNotifier.Channel(): + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // if parent got abandoned no point to continue, and we just go to the abandoned state and perform cleanup logic. + if p.checkAbandoned() { + if err := p.transitionTo(StateAbandoned); err != nil { + return fmt.Errorf("could not transition to abandoned state: %w", err) + } + } + + currentState := p.GetState() + switch currentState { + case StatePending: + if err := p.onStartProcessing(); err != nil { + return fmt.Errorf("could not process pending state: %w", err) + } + case StateProcessing: + if err := p.onProcessing(); err != nil { + return fmt.Errorf("could not process processing state: %w", err) + } + case StateWaitingPersist: + if err := p.onPersistChanges(); err != nil { + return fmt.Errorf("could not process waiting persist state: %w", err) + } + case StateAbandoned: + if err := p.core.Abandon(); err != nil { + return fmt.Errorf("could not process abandonded state: %w", err) + } + return nil + case StateComplete: + return nil // terminate + default: + return fmt.Errorf("invalid pipeline state: %s", currentState) + } + } + } +} + +// onStartProcessing performs the initial state transitions depending on the parent state: +// - Pending -> Processing +// - Pending -> Abandoned +// No errors are expected during normal operations. +func (p *PipelineImpl) onStartProcessing() error { + switch p.parentState() { + case StateProcessing, StateWaitingPersist, StateComplete: + err := p.transitionTo(StateProcessing) + if err != nil { + return err + } + p.worker.Submit(p.performDownload) + case StatePending: + return nil + case StateAbandoned: + return p.transitionTo(StateAbandoned) + default: + // it's unexpected for the parent to be in any other state. this most likely indicates there's a bug + return fmt.Errorf("unexpected parent state: %s", p.parentState()) + } + return nil +} + +// onProcessing performs the state transitions when the pipeline is in the Processing state. +// When data has been successfully indexed, we can transition to StateWaitingPersist. +// No errors are expected during normal operations. +func (p *PipelineImpl) onProcessing() error { + if p.isIndexed.Load() { + return p.transitionTo(StateWaitingPersist) + } + return nil +} + +// onPersistChanges performs the state transitions when the pipeline is in the WaitingPersist state. +// When the execution result has been sealed and the parent has already transitioned to StateComplete then +// we can persist the data and transition to StateComplete. +// No errors are expected during normal operations. +func (p *PipelineImpl) onPersistChanges() error { + if p.isSealed.Load() && p.parentState() == StateComplete { + if err := p.core.Persist(); err != nil { + return fmt.Errorf("could not persist pending changes: %w", err) + } + return p.transitionTo(StateComplete) + } else { + return nil + } +} + +// checkAbandoned returns true if the pipeline or its parent are abandoned. +func (p *PipelineImpl) checkAbandoned() bool { + if p.isAbandoned.Load() { + return true + } + if p.parentState() == StateAbandoned { + return true + } + return p.GetState() == StateAbandoned +} + +// GetState returns the current state of the pipeline. +func (p *PipelineImpl) GetState() State { + return State(p.state.Load()) +} + +// parentState returns the last cached parent state of the pipeline. +func (p *PipelineImpl) parentState() State { + return State(p.parentStateCache.Load()) +} + +// SetSealed marks the execution result as sealed. +// This will cause the pipeline to eventually transition to the StateComplete state when the parent finishes processing. +func (p *PipelineImpl) SetSealed() { + // Note: do not use a mutex here to avoid blocking the results forest. + if p.isSealed.CompareAndSwap(false, true) { + p.stateChangedNotifier.Notify() + } +} + +// OnParentStateUpdated updates the pipeline's state based on the provided parent state. +// If the parent state has changed, it will notify the state consumer and trigger a state change notification. +func (p *PipelineImpl) OnParentStateUpdated(parentState State) { + oldState := p.parentStateCache.Load() + if p.parentStateCache.CompareAndSwap(oldState, int32(parentState)) { + p.stateChangedNotifier.Notify() + } +} + +// Abandon marks the pipeline as abandoned +// This will cause the pipeline to eventually transition to the Abandoned state and halt processing +func (p *PipelineImpl) Abandon() { + if p.isAbandoned.CompareAndSwap(false, true) { + p.stateChangedNotifier.Notify() + } +} + +// performDownload performs the processing step of the pipeline by downloading and indexing data. +// It uses an atomic flag to indicate whether the operation has been completed successfully which +// informs the state machine that eventually it can transition to the next state. +// Expected Errors: +// - context.Canceled: when the context is canceled +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +func (p *PipelineImpl) performDownload(ctx context.Context) error { + if err := p.core.Download(ctx); err != nil { + return fmt.Errorf("could not perform download: %w", err) + } + if err := p.core.Index(); err != nil { + return fmt.Errorf("could not perform indexing: %w", err) + } + if p.isIndexed.CompareAndSwap(false, true) { + p.stateChangedNotifier.Notify() + } + return nil +} + +// transitionTo transitions the pipeline to the given state and broadcasts +// the state change to children pipelines. +// +// Expected Errors: +// - ErrInvalidTransition: when the transition is invalid +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +func (p *PipelineImpl) transitionTo(newState State) error { + hasChange, err := p.setState(newState) + if err != nil { + return err + } + + if hasChange { + // send notification for all state changes. we require that implementations of [PipelineStateConsumer] + // are non-blocking and consume the state updates without noteworthy delay. + p.stateConsumer.OnStateUpdated(newState) + p.stateChangedNotifier.Notify() + } + + return nil +} + +// setState sets the state of the pipeline and logs the transition. +// Returns true if the state was changed, false otherwise. +// +// Expected Errors: +// - ErrInvalidTransition: when the state transition is invalid +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +func (p *PipelineImpl) setState(newState State) (bool, error) { + currentState := p.GetState() + + // transitioning to the same state is a no-op + if currentState == newState { + return false, nil + } + + if err := p.validateTransition(currentState, newState); err != nil { + return false, fmt.Errorf("failed to transition from %s to %s: %w", currentState, newState, err) + } + + if !p.state.CompareAndSwap(int32(currentState), int32(newState)) { + // Note: this should never happen since state is only updated within the Run goroutine. + return false, fmt.Errorf("failed to transition from %s to %s", currentState, newState) + } + + p.log.Debug(). + Str("old_state", currentState.String()). + Str("new_state", newState.String()). + Msg("pipeline state transition") + + return true, nil +} + +// validateTransition validates the transition from the current state to the new state. +// +// Expected Errors: +// - ErrInvalidTransition: when the transition is invalid +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +func (p *PipelineImpl) validateTransition(currentState State, newState State) error { + switch newState { + case StateProcessing: + if currentState == StatePending { + return nil + } + case StateWaitingPersist: + if currentState == StateProcessing { + return nil + } + case StateComplete: + if currentState == StateWaitingPersist { + return nil + } + case StateAbandoned: + // Note: it does not make sense to transition to abandoned from persisting or completed since to be in either state: + // 1. the parent must be completed + // 2. the pipeline's result must be sealed + // At that point, there are no conditions that would cause the pipeline be abandoned + switch currentState { + case StatePending, StateProcessing, StateWaitingPersist: + return nil + } + + default: + return fmt.Errorf("invalid transition to state: %s", newState) + } + + return ErrInvalidTransition +} diff --git a/module/executiondatasync/optimistic_sync/pipeline_functional_test.go b/module/executiondatasync/optimistic_sync/pipeline_functional_test.go new file mode 100644 index 00000000000..1a93fbcbb05 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/pipeline_functional_test.go @@ -0,0 +1,585 @@ +package optimistic_sync + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + txerrmsgsmock "github.com/onflow/flow-go/engine/access/ingestion/tx_error_messages/mock" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + reqestermock "github.com/onflow/flow-go/module/state_synchronization/requester/mock" + "github.com/onflow/flow-go/storage" + bstorage "github.com/onflow/flow-go/storage/badger" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + pebbleStorage "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +type PipelineFunctionalSuite struct { + suite.Suite + logger zerolog.Logger + execDataRequester *reqestermock.ExecutionDataRequester + txResultErrMsgsRequester *txerrmsgsmock.Requester + txResultErrMsgsRequestTimeout time.Duration + tmpDir string + bdb *badger.DB + pdb *pebble.DB + db storage.DB + lockManager lockctx.Manager + persistentRegisters *pebbleStorage.Registers + persistentEvents storage.Events + persistentCollections *store.Collections + persistentTransactions *store.Transactions + persistentResults *store.LightTransactionResults + persistentTxResultErrMsg *store.TransactionResultErrorMessages + consumerProgress storage.ConsumerProgress + headers *store.Headers + results *store.ExecutionResults + persistentLatestSealedResult *store.LatestPersistedSealedResult + core *CoreImpl + block *flow.Block + executionResult *flow.ExecutionResult + metrics module.CacheMetrics + config PipelineConfig + expectedExecutionData *execution_data.BlockExecutionData + expectedTxResultErrMsgs []flow.TransactionResultErrorMessage +} + +func TestPipelineFunctionalSuite(t *testing.T) { + t.Parallel() + suite.Run(t, new(PipelineFunctionalSuite)) +} + +// SetupTest initializes the test environment for each test case. +// It creates temporary directories, initializes database connections, +// sets up storage backends, creates test fixtures, and initializes +// the core implementation with all required dependencies. +func (p *PipelineFunctionalSuite) SetupTest() { + t := p.T() + p.lockManager = storage.NewTestingLockManager() + + p.tmpDir = unittest.TempDir(t) + p.logger = zerolog.Nop() + p.metrics = metrics.NewNoopCollector() + p.bdb = unittest.BadgerDB(t, p.tmpDir) + p.db = badgerimpl.ToDB(p.bdb) + + rootBlock := unittest.BlockHeaderFixture() + sealedBlock := unittest.BlockWithParentFixture(rootBlock) + sealedExecutionResult := unittest.ExecutionResultFixture(unittest.WithBlock(sealedBlock)) + + // Create real storages + var err error + p.pdb = pebbleStorage.NewBootstrappedRegistersWithPathForTest(t, p.tmpDir, rootBlock.Height, sealedBlock.Header.Height) + p.persistentRegisters, err = pebbleStorage.NewRegisters(p.pdb, pebbleStorage.PruningDisabled) + p.Require().NoError(err) + + p.persistentEvents = store.NewEvents(p.metrics, p.db) + p.persistentTransactions = store.NewTransactions(p.metrics, p.db) + p.persistentCollections = store.NewCollections(p.db, p.persistentTransactions) + p.persistentResults = store.NewLightTransactionResults(p.metrics, p.db, bstorage.DefaultCacheSize) + p.persistentTxResultErrMsg = store.NewTransactionResultErrorMessages(p.metrics, p.db, bstorage.DefaultCacheSize) + p.results = store.NewExecutionResults(p.metrics, p.db) + + p.consumerProgress, err = store.NewConsumerProgress(p.db, "test_consumer").Initialize(sealedBlock.Header.Height) + p.Require().NoError(err) + + // store and index the root header + p.headers = store.NewHeaders(p.metrics, p.db) + + insertLctx := p.lockManager.NewContext() + err = insertLctx.AcquireLock(storage.LockInsertBlock) + p.Require().NoError(err) + + err = p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(insertLctx, rw, rootBlock.ID(), rootBlock) + }) + p.Require().NoError(err) + insertLctx.Release() + + lctx := p.lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockFinalizeBlock)) + err = p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, rootBlock.Height, rootBlock.ID()) + }) + p.Require().NoError(err) + lctx.Release() + + // store and index the latest sealed block header + insertLctx2 := p.lockManager.NewContext() + require.NoError(t, insertLctx2.AcquireLock(storage.LockInsertBlock)) + err = p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(insertLctx2, rw, sealedBlock.Header.ID(), sealedBlock.Header) + }) + p.Require().NoError(err) + insertLctx2.Release() + + lctx = p.lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockFinalizeBlock)) + err = p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, sealedBlock.Header.Height, sealedBlock.ID()) + }) + p.Require().NoError(err) + lctx.Release() + + // Store and index sealed block execution result + err = p.results.Store(sealedExecutionResult) + p.Require().NoError(err) + + err = p.results.Index(sealedBlock.ID(), sealedExecutionResult.ID()) + p.Require().NoError(err) + + p.persistentLatestSealedResult, err = store.NewLatestPersistedSealedResult(p.consumerProgress, p.headers, p.results) + p.Require().NoError(err) + + p.block = unittest.BlockWithParentFixture(sealedBlock.Header) + p.executionResult = unittest.ExecutionResultFixture(unittest.WithBlock(p.block)) + + p.execDataRequester = reqestermock.NewExecutionDataRequester(t) + p.txResultErrMsgsRequester = txerrmsgsmock.NewRequester(t) + p.txResultErrMsgsRequestTimeout = DefaultTxResultErrMsgsRequestTimeout + + p.config = PipelineConfig{ + parentState: StateWaitingPersist, + } + p.expectedExecutionData, p.expectedTxResultErrMsgs = p.createExecutionData() +} + +// TearDownTest cleans up resources after each test case. +// It closes database connections and removes temporary directories +// to ensure a clean state for subsequent tests. +func (p *PipelineFunctionalSuite) TearDownTest() { + p.Require().NoError(p.pdb.Close()) + p.Require().NoError(p.bdb.Close()) + p.Require().NoError(os.RemoveAll(p.tmpDir)) +} + +// TestPipelineCompletesSuccessfully verifies the successful completion of the pipeline. +// It tests that: +// 1. Pipeline processes execution data through all states correctly +// 2. All data types (events, collections, transactions, registers, error messages) are correctly persisted to storage +// 3. No errors occur during the entire process +func (p *PipelineFunctionalSuite) TestPipelineCompletesSuccessfully() { + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(p.expectedExecutionData, nil).Once() + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(p.expectedTxResultErrMsgs, nil).Once() + + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + pipeline.OnParentStateUpdated(StateComplete) + + waitForStateUpdates(p.T(), updateChan, StateProcessing, StateWaitingPersist) + + pipeline.SetSealed() + + waitForStateUpdates(p.T(), updateChan, StateComplete) + + expectedChunkExecutionData := p.expectedExecutionData.ChunkExecutionDatas[0] + p.verifyDataPersistence(expectedChunkExecutionData, p.expectedTxResultErrMsgs) + }, p.config) +} + +// TestPipelineDownloadError tests how the pipeline handles errors during the download phase. +// It ensures that both execution data and transaction result error message request errors +// are correctly detected and returned. +func (p *PipelineFunctionalSuite) TestPipelineDownloadError() { + tests := []struct { + name string + expectedErr error + requesterInitialization func(err error) + }{ + { + name: "execution data requester malformed data error", + expectedErr: execution_data.NewMalformedDataError(fmt.Errorf("execution data test deserialization error")), + requesterInitialization: func(err error) { + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return((*execution_data.BlockExecutionData)(nil), err).Once() + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(p.expectedTxResultErrMsgs, nil).Once() + }, + }, + { + name: "transaction result error messages requester not found error", + expectedErr: fmt.Errorf("test transaction result error messages not found error"), + requesterInitialization: func(err error) { + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(p.expectedExecutionData, nil).Once() + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(([]flow.TransactionResultErrorMessage)(nil), err).Once() + }, + }, + } + + for _, test := range tests { + p.T().Run(test.name, func(t *testing.T) { + test.requesterInitialization(test.expectedErr) + + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + pipeline.OnParentStateUpdated(StateComplete) + + waitForError(p.T(), errChan, test.expectedErr) + p.Assert().Equal(StateProcessing, pipeline.GetState()) + }, p.config) + }) + } +} + +// TestPipelineIndexingError tests error handling during the indexing phase. +// It verifies that when execution data contains invalid block IDs, the pipeline +// properly detects the inconsistency and returns an appropriate error. +func (p *PipelineFunctionalSuite) TestPipelineIndexingError() { + invalidBlockID := unittest.IdentifierFixture() + // Setup successful download + expectedExecutionData := unittest.BlockExecutionDataFixture( + unittest.WithBlockExecutionDataBlockID(invalidBlockID), // Wrong block ID to cause indexing error + ) + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(expectedExecutionData, nil).Once() + + // note: txResultErrMsgsRequester.Request() currently never returns and error, so skipping the case + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(p.expectedTxResultErrMsgs, nil).Once() + + expectedIndexingError := fmt.Errorf( + "could not perform indexing: invalid block execution data. expected block_id=%s, actual block_id=%s", + p.block.ID().String(), + invalidBlockID.String(), + ) + + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + pipeline.OnParentStateUpdated(StateComplete) + + waitForErrorWithCustomCheckers(p.T(), errChan, func(err error) { + p.Require().Error(err) + + p.Assert().Equal(expectedIndexingError.Error(), err.Error()) + }) + p.Assert().Equal(StateProcessing, pipeline.GetState()) + }, p.config) +} + +// TestPipelinePersistingError tests the pipeline behavior when an error occurs during the persisting step. +func (p *PipelineFunctionalSuite) TestPipelinePersistingError() { + expectedError := fmt.Errorf("test events batch store error") + // Mock events storage to simulate an error on a persisting step. In normal flow and with real storages, + // it is hard to make a meaningful error explicitly. + mockEvents := storagemock.NewEvents(p.T()) + mockEvents.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(expectedError).Once() + p.persistentEvents = mockEvents + + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(p.expectedExecutionData, nil).Once() + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(p.expectedTxResultErrMsgs, nil).Once() + + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + pipeline.OnParentStateUpdated(StateComplete) + + waitForStateUpdates(p.T(), updateChan, StateProcessing, StateWaitingPersist) + + pipeline.SetSealed() + + waitForError(p.T(), errChan, expectedError) + p.Assert().Equal(StateWaitingPersist, pipeline.GetState()) + }, p.config) +} + +// TestMainCtxCancellationDuringRequestingExecutionData tests context cancellation during the +// request of execution data. It ensures that cancellation is handled properly when triggered +// while execution data is being downloaded. +func (p *PipelineFunctionalSuite) TestMainCtxCancellationDuringRequestingExecutionData() { + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return( + func(ctx context.Context) (*execution_data.BlockExecutionData, error) { + // Wait for cancellation + cancel() + + <-ctx.Done() + + return nil, ctx.Err() + }).Once() + + // This call marked as `Maybe()` because it may not be called depending on timing. + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return([]flow.TransactionResultErrorMessage{}, nil).Maybe() + + pipeline.OnParentStateUpdated(StateComplete) + + waitForStateUpdates(p.T(), updateChan, StateProcessing) + waitForError(p.T(), errChan, context.Canceled) + + p.Assert().Equal(StateProcessing, pipeline.GetState()) + }, p.config) +} + +// TestMainCtxCancellationDuringRequestingTxResultErrMsgs tests context cancellation during +// the request of transaction result error messages. It verifies that when the parent context +// is cancelled during this phase, the pipeline handles the cancellation gracefully +// and transitions to the correct state. +func (p *PipelineFunctionalSuite) TestMainCtxCancellationDuringRequestingTxResultErrMsgs() { + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + // This call marked as `Maybe()` because it may not be called depending on timing. + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return((*execution_data.BlockExecutionData)(nil), nil).Maybe() + + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return( + func(ctx context.Context) ([]flow.TransactionResultErrorMessage, error) { + // Wait for cancellation + cancel() + + <-ctx.Done() + + return nil, ctx.Err() + }).Maybe() + + pipeline.OnParentStateUpdated(StateComplete) + + waitForStateUpdates(p.T(), updateChan, StateProcessing) + waitForError(p.T(), errChan, context.Canceled) + + p.Assert().Equal(StateProcessing, pipeline.GetState()) + }, p.config) +} + +// TestMainCtxCancellationDuringWaitingPersist tests the pipeline's behavior when the main context is canceled during StateWaitingPersist. +func (p *PipelineFunctionalSuite) TestMainCtxCancellationDuringWaitingPersist() { + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(p.expectedExecutionData, nil).Once() + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(p.expectedTxResultErrMsgs, nil).Once() + + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + pipeline.OnParentStateUpdated(StateComplete) + + waitForStateUpdates(p.T(), updateChan, StateProcessing, StateWaitingPersist) + + cancel() + + pipeline.SetSealed() + + waitForError(p.T(), errChan, context.Canceled) + + p.Assert().Equal(StateWaitingPersist, pipeline.GetState()) + }, p.config) +} + +// TestPipelineShutdownOnParentAbandon verifies that the pipeline transitions correctly to a shutdown state when the parent is abandoned. +func (p *PipelineFunctionalSuite) TestPipelineShutdownOnParentAbandon() { + tests := []struct { + name string + config PipelineConfig + customSetup func(pipeline Pipeline, updateChan chan State) + }{ + { + name: "from StatePending", + config: PipelineConfig{ + beforePipelineRun: func(pipeline *PipelineImpl) { + pipeline.OnParentStateUpdated(StateAbandoned) + }, + parentState: StateAbandoned, + }, + }, + { + name: "from StateProcessing", + customSetup: func(pipeline Pipeline, updateChan chan State) { + waitForStateUpdates(p.T(), updateChan, StateProcessing) + + pipeline.OnParentStateUpdated(StateAbandoned) + }, + config: p.config, + }, + { + name: "from StateWaitingPersist", + customSetup: func(pipeline Pipeline, updateChan chan State) { + waitForStateUpdates(p.T(), updateChan, StateProcessing, StateWaitingPersist) + + pipeline.OnParentStateUpdated(StateAbandoned) + }, + config: p.config, + }, + } + + for _, test := range tests { + p.T().Run(test.name, func(t *testing.T) { + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(p.expectedExecutionData, nil).Maybe() + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(p.expectedTxResultErrMsgs, nil).Maybe() + + if test.customSetup != nil { + test.customSetup(pipeline, updateChan) + } + + waitForStateUpdates(p.T(), updateChan, StateAbandoned) + waitForError(p.T(), errChan, nil) + + p.Assert().Equal(StateAbandoned, pipeline.GetState()) + p.Assert().Nil(p.core.workingData) + }, test.config) + }) + } +} + +type PipelineConfig struct { + beforePipelineRun func(pipeline *PipelineImpl) + parentState State +} + +// WithRunningPipeline is a test helper that initializes and starts a pipeline instance. +// It manages the context and channels needed to run the pipeline and invokes the testFunc +// with access to the pipeline, update channel, error channel, and cancel function. +func (p *PipelineFunctionalSuite) WithRunningPipeline( + testFunc func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc), + pipelineConfig PipelineConfig, +) { + + p.core = NewCoreImpl( + p.logger, + p.executionResult, + p.block.Header, + p.execDataRequester, + p.txResultErrMsgsRequester, + p.txResultErrMsgsRequestTimeout, + p.persistentRegisters, + p.persistentEvents, + p.persistentCollections, + p.persistentResults, + p.persistentTxResultErrMsg, + p.persistentLatestSealedResult, + p.db, + p.lockManager, + ) + + pipelineStateConsumer := NewMockStateConsumer() + + pipeline := NewPipeline(p.logger, p.executionResult, false, pipelineStateConsumer) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + errChan := make(chan error) + // wait until a pipeline goroutine run a pipeline + pipelineIsReady := make(chan struct{}) + + go func() { + if pipelineConfig.beforePipelineRun != nil { + pipelineConfig.beforePipelineRun(pipeline) + } + + close(pipelineIsReady) + + errChan <- pipeline.Run(ctx, p.core, pipelineConfig.parentState) + }() + + <-pipelineIsReady + + testFunc(pipeline, pipelineStateConsumer.updateChan, errChan, cancel) +} + +// createExecutionData creates and returns test execution data and transaction result +// error messages for use in test cases. It generates realistic test data including +// chunk execution data with events, trie updates, collections, and system chunks. +func (p *PipelineFunctionalSuite) createExecutionData() (*execution_data.BlockExecutionData, []flow.TransactionResultErrorMessage) { + expectedChunkExecutionData := unittest.ChunkExecutionDataFixture( + p.T(), + 0, + unittest.WithChunkEvents(unittest.EventsFixture(5)), + unittest.WithTrieUpdate(indexer.TrieUpdateRandomLedgerPayloadsFixture(p.T())), + ) + systemChunkCollection := unittest.CollectionFixture(1) + systemChunkData := &execution_data.ChunkExecutionData{ + Collection: &systemChunkCollection, + } + + expectedExecutionData := unittest.BlockExecutionDataFixture( + unittest.WithBlockExecutionDataBlockID(p.block.ID()), + unittest.WithChunkExecutionDatas(expectedChunkExecutionData, systemChunkData), + ) + expectedTxResultErrMsgs := unittest.TransactionResultErrorMessagesFixture(5) + return expectedExecutionData, expectedTxResultErrMsgs +} + +// verifyDataPersistence checks that all expected data was actually persisted to storage. +// It verifies the persistence of events, collections, transaction results, registers, +// and transaction result error messages by comparing stored data with expected values. +func (p *PipelineFunctionalSuite) verifyDataPersistence( + expectedChunkExecutionData *execution_data.ChunkExecutionData, + expectedTxResultErrMsgs []flow.TransactionResultErrorMessage, +) { + p.verifyEventsPersisted(expectedChunkExecutionData.Events) + + p.verifyCollectionPersisted(expectedChunkExecutionData.Collection) + + p.verifyTransactionResultsPersisted(expectedChunkExecutionData.TransactionResults) + + p.verifyRegistersPersisted(expectedChunkExecutionData.TrieUpdate) + + p.verifyTxResultErrorMessagesPersisted(expectedTxResultErrMsgs) +} + +// verifyEventsPersisted checks that events were stored correctly in the events storage. +// It retrieves events by block ID and compares them with the expected events list. +func (p *PipelineFunctionalSuite) verifyEventsPersisted(expectedEvents flow.EventsList) { + storedEvents, err := p.persistentEvents.ByBlockID(p.block.ID()) + p.Require().NoError(err) + + p.Assert().Equal(expectedEvents, flow.EventsList(storedEvents)) +} + +// verifyCollectionPersisted checks that the collection was stored correctly in the +// collections storage. It verifies both the light collection data and its transaction +// IDs are persisted correctly. +func (p *PipelineFunctionalSuite) verifyCollectionPersisted(expectedCollection *flow.Collection) { + collectionID := expectedCollection.ID() + expectedLightCollection := expectedCollection.Light() + + storedLightCollection, err := p.persistentCollections.LightByID(collectionID) + p.Require().NoError(err) + + p.Assert().Equal(&expectedLightCollection, storedLightCollection) + p.Assert().ElementsMatch(expectedCollection.Light().Transactions, storedLightCollection.Transactions) +} + +// verifyTransactionResultsPersisted checks that transaction results were stored correctly +// in the results storage. It retrieves results by block ID and compares them with expected results. +func (p *PipelineFunctionalSuite) verifyTransactionResultsPersisted(expectedResults []flow.LightTransactionResult) { + storedResults, err := p.persistentResults.ByBlockID(p.block.ID()) + p.Require().NoError(err) + + p.Assert().ElementsMatch(expectedResults, storedResults) +} + +// verifyRegistersPersisted checks that registers were stored correctly in the registers storage. +// It iterates through all payloads in the trie update and verifies each register value +// can be retrieved at the correct block height. +func (p *PipelineFunctionalSuite) verifyRegistersPersisted(expectedTrieUpdate *ledger.TrieUpdate) { + for _, payload := range expectedTrieUpdate.Payloads { + key, err := payload.Key() + p.Require().NoError(err) + + registerID, err := convert.LedgerKeyToRegisterID(key) + p.Require().NoError(err) + + storedValue, err := p.persistentRegisters.Get(registerID, p.block.Header.Height) + p.Require().NoError(err) + + expectedValue := payload.Value() + p.Assert().Equal(expectedValue, ledger.Value(storedValue)) + } +} + +// verifyTxResultErrorMessagesPersisted checks that transaction result error messages +// were stored correctly in the error messages storage. It retrieves messages by block ID +// and compares them with expected error messages. +func (p *PipelineFunctionalSuite) verifyTxResultErrorMessagesPersisted( + expectedTxResultErrMsgs []flow.TransactionResultErrorMessage, +) { + storedErrMsgs, err := p.persistentTxResultErrMsg.ByBlockID(p.block.ID()) + p.Require().NoError(err, "Should be able to retrieve tx result error messages by block ID") + + p.Assert().ElementsMatch(expectedTxResultErrMsgs, storedErrMsgs) +} diff --git a/module/executiondatasync/optimistic_sync/pipeline_test.go b/module/executiondatasync/optimistic_sync/pipeline_test.go new file mode 100644 index 00000000000..5efc85785ae --- /dev/null +++ b/module/executiondatasync/optimistic_sync/pipeline_test.go @@ -0,0 +1,428 @@ +package optimistic_sync + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + osmock "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestPipelineStateTransitions verifies that the pipeline correctly transitions +// through states when provided with the correct conditions. +func TestPipelineStateTransitions(t *testing.T) { + pipeline, mockCore, updateChan, parent := createPipeline(t) + + pipeline.SetSealed() + parent.UpdateState(StateComplete, pipeline) + + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Return(nil) + mockCore.On("Persist").Return(nil) + + assert.Equal(t, StatePending, pipeline.GetState(), "Pipeline should start in Pending state") + + errChan := make(chan error) + go func() { + errChan <- pipeline.Run(context.Background(), mockCore, parent.GetState()) + }() + + // Wait for pipeline to reach WaitingPersist state + expectedStates := []State{StateProcessing, StateWaitingPersist, StateComplete} + waitForStateUpdates(t, updateChan, expectedStates...) + assert.Equal(t, StateComplete, pipeline.GetState(), "Pipeline should be in Complete state") + + // Run should complete without error + waitForError(t, errChan, nil) +} + +// TestPipelineParentDependentTransitions verifies that a pipeline's transitions +// depend on the parent pipeline's state. +func TestPipelineParentDependentTransitions(t *testing.T) { + pipeline, mockCore, updateChan, parent := createPipeline(t) + + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Return(nil) + mockCore.On("Persist").Return(nil) + + assert.Equal(t, StatePending, pipeline.GetState(), "Pipeline should start in Pending state") + + errChan := make(chan error) + go func() { + errChan <- pipeline.Run(context.Background(), mockCore, parent.GetState()) + }() + + // Initial update - parent in Ready state + parent.UpdateState(StatePending, pipeline) + + // Check that pipeline remains in Ready state + waitNeverStateUpdate(t, updateChan) + assert.Equal(t, StatePending, pipeline.GetState(), "Pipeline should start in Ready state") + mockCore.AssertNotCalled(t, "Download") + + // Update parent to downloading + parent.UpdateState(StateProcessing, pipeline) + + // Pipeline should now progress to WaitingPersist state and stop + expectedStates := []State{StateProcessing, StateWaitingPersist} + waitForStateUpdates(t, updateChan, expectedStates...) + assert.Equal(t, StateWaitingPersist, pipeline.GetState(), "Pipeline should progress to WaitingPersist state") + mockCore.AssertCalled(t, "Download", mock.Anything) + mockCore.AssertCalled(t, "Index") + mockCore.AssertNotCalled(t, "Persist") + + waitNeverStateUpdate(t, updateChan) + assert.Equal(t, StateWaitingPersist, pipeline.GetState(), "Pipeline should remain in WaitingPersist state") + + // Update parent to complete - should allow persisting when sealed + parent.UpdateState(StateComplete, pipeline) + + // this alone should not allow the pipeline to progress to any other state + waitNeverStateUpdate(t, updateChan) + assert.Equal(t, StateWaitingPersist, pipeline.GetState(), "Pipeline should remain in WaitingPersist state") + + // Mark the execution result as sealed, this should allow the pipeline to progress to Complete state + pipeline.SetSealed() + + // Wait for pipeline to complete + expectedStates = []State{StateComplete} + waitForStateUpdates(t, updateChan, expectedStates...) + assert.Equal(t, StateComplete, pipeline.GetState(), "Pipeline should reach Complete state") + mockCore.AssertCalled(t, "Persist") + + // Run should complete without error + waitForError(t, errChan, nil) +} + +// TestParentAbandoned verifies that a pipeline is properly abandoned when +// the parent pipeline is abandoned. +func TestAbandoned(t *testing.T) { + t.Run("starts already abandoned", func(t *testing.T) { + pipeline, mockCore, updateChan, parent := createPipeline(t) + + mockCore.On("Abandon").Return(nil) + + pipeline.Abandon() + + errChan := make(chan error) + go func() { + errChan <- pipeline.Run(context.Background(), mockCore, parent.GetState()) + }() + + // first state must be abandoned + waitForStateUpdates(t, updateChan, StateAbandoned) + + // Run should complete without error + waitForError(t, errChan, nil) + }) + + // Test cases abandoning during different stages of processing + testCases := []struct { + name string + setupMock func(*PipelineImpl, *mockStateProvider, *osmock.Core) + expectedStates []State + }{ + { + name: "Abandon during download", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) { + mockCore.On("Download", mock.Anything).Run(func(args mock.Arguments) { + pipeline.Abandon() + + ctx := args[0].(context.Context) + unittest.RequireCloseBefore(t, ctx.Done(), 500*time.Millisecond, "Abandon should cause context to be canceled") + }).Return(func(ctx context.Context) error { + return ctx.Err() + }) + }, + expectedStates: []State{StateProcessing, StateAbandoned}, + }, + { + name: "Parent abandoned during download", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) { + mockCore.On("Download", mock.Anything).Run(func(args mock.Arguments) { + parent.UpdateState(StateAbandoned, pipeline) + + ctx := args[0].(context.Context) + unittest.RequireCloseBefore(t, ctx.Done(), 500*time.Millisecond, "Abandon should cause context to be canceled") + }).Return(func(ctx context.Context) error { + return ctx.Err() + }) + }, + expectedStates: []State{StateProcessing, StateAbandoned}, + }, + { + name: "Abandon during index", + // Note: indexing will complete, and the pipeline will transition to waiting persist + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) { + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Run(func(args mock.Arguments) { + pipeline.Abandon() + }).Return(nil) + }, + expectedStates: []State{StateProcessing, StateAbandoned}, + }, + { + name: "Parent abandoned during index", + // Note: indexing will complete, and the pipeline will transition to waiting persist + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) { + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Run(func(args mock.Arguments) { + parent.UpdateState(StateAbandoned, pipeline) + }).Return(nil) + }, + expectedStates: []State{StateProcessing, StateAbandoned}, + }, + { + name: "Abandon during waiting to persist", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) { + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Run(func(args mock.Arguments) { + go func() { + time.Sleep(100 * time.Millisecond) + pipeline.Abandon() + }() + }).Return(nil) + }, + expectedStates: []State{StateProcessing, StateWaitingPersist, StateAbandoned}, + }, + { + name: "Parent abandoned during waiting to persist", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) { + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Run(func(args mock.Arguments) { + go func() { + time.Sleep(100 * time.Millisecond) + parent.UpdateState(StateAbandoned, pipeline) + }() + }).Return(nil) + }, + expectedStates: []State{StateProcessing, StateWaitingPersist, StateAbandoned}, + }, + // Note: it does not make sense to abandon during persist, since it will only be run when: + // 1. the parent is already complete + // 2. the pipeline's result is sealed + // At that point, there are no conditions that would cause the pipeline to transition to any other state + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + pipeline, mockCore, updateChan, parent := createPipeline(t) + tc.setupMock(pipeline, parent, mockCore) + + mockCore.On("Abandon").Return(nil) + + errChan := make(chan error) + go func() { + errChan <- pipeline.Run(context.Background(), mockCore, parent.GetState()) + }() + + // Send parent update to start processing + parent.UpdateState(StateProcessing, pipeline) + + waitForStateUpdates(t, updateChan, tc.expectedStates...) + + waitForError(t, errChan, nil) + }) + } +} + +// TestPipelineContextCancellation tests the Run method's context cancelation behavior during different stages of processing +func TestPipelineContextCancellation(t *testing.T) { + // Test cases for different stages of processing + testCases := []struct { + name string + setupMock func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) context.Context + }{ + { + name: "Cancel before download starts", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) context.Context { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + // no Core methods called + return ctx + }, + }, + { + name: "Cancel during download", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) context.Context { + ctx, cancel := context.WithCancel(context.Background()) + mockCore.On("Download", mock.Anything).Run(func(args mock.Arguments) { + cancel() + pipelineCtx := args[0].(context.Context) + unittest.RequireCloseBefore(t, pipelineCtx.Done(), 500*time.Millisecond, "Abandon should cause context to be canceled") + }).Return(func(pipelineCtx context.Context) error { + return pipelineCtx.Err() + }) + return ctx + }, + }, + { + name: "Cancel between steps", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) context.Context { + ctx, cancel := context.WithCancel(context.Background()) + + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Run(func(args mock.Arguments) { + cancel() + }).Return(nil) + + return ctx + }, + }, + { + name: "Cancel during abandon", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) context.Context { + ctx, cancel := context.WithCancel(context.Background()) + + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Run(func(args mock.Arguments) { + pipeline.Abandon() + }).Return(nil) + mockCore.On("Abandon").Run(func(args mock.Arguments) { + cancel() + }).Return(func() error { + return ctx.Err() + }) + + return ctx + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + pipeline, mockCore, _, parent := createPipeline(t) + + parent.UpdateState(StateComplete, pipeline) + pipeline.SetSealed() + + ctx := tc.setupMock(pipeline, parent, mockCore) + + errChan := make(chan error) + go func() { + errChan <- pipeline.Run(ctx, mockCore, parent.GetState()) + }() + + waitForError(t, errChan, context.Canceled) + }) + } +} + +// TestPipelineErrorHandling verifies that errors from Core methods are properly +// propagated back to the caller. +func TestPipelineErrorHandling(t *testing.T) { + // Test cases for different stages of processing + testCases := []struct { + name string + setupMock func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core, expectedErr error) + expectedErr error + expectedStates []State + }{ + { + name: "Download Error", + setupMock: func(pipeline *PipelineImpl, _ *mockStateProvider, mockCore *osmock.Core, expectedErr error) { + mockCore.On("Download", mock.Anything).Return(expectedErr) + }, + expectedErr: errors.New("download error"), + expectedStates: []State{StateProcessing}, + }, + { + name: "Index Error", + setupMock: func(pipeline *PipelineImpl, _ *mockStateProvider, mockCore *osmock.Core, expectedErr error) { + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Return(expectedErr) + }, + expectedErr: errors.New("index error"), + expectedStates: []State{StateProcessing}, + }, + { + name: "Persist Error", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core, expectedErr error) { + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Run(func(args mock.Arguments) { + parent.UpdateState(StateComplete, pipeline) + pipeline.SetSealed() + }).Return(nil) + mockCore.On("Persist").Return(expectedErr) + }, + expectedErr: errors.New("persist error"), + expectedStates: []State{StateProcessing, StateWaitingPersist}, + }, + { + name: "Abandon Error", + setupMock: func(pipeline *PipelineImpl, _ *mockStateProvider, mockCore *osmock.Core, expectedErr error) { + pipeline.Abandon() + mockCore.On("Abandon").Return(expectedErr) + }, + expectedErr: errors.New("abandon error"), + expectedStates: []State{StateAbandoned}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + pipeline, mockCore, updateChan, parent := createPipeline(t) + + tc.setupMock(pipeline, parent, mockCore, tc.expectedErr) + + errChan := make(chan error) + go func() { + errChan <- pipeline.Run(context.Background(), mockCore, parent.GetState()) + }() + + // Send parent update to trigger processing + parent.UpdateState(StateProcessing, pipeline) + + waitForStateUpdates(t, updateChan, tc.expectedStates...) + + waitForError(t, errChan, tc.expectedErr) + }) + } +} + +// TestSetSealed verifies that the pipeline correctly sets the sealed flag. +func TestSetSealed(t *testing.T) { + pipeline, _, _, _ := createPipeline(t) + + pipeline.SetSealed() + assert.True(t, pipeline.isSealed.Load()) +} + +// TestValidateTransition verifies that the pipeline correctly validates state transitions. +func TestValidateTransition(t *testing.T) { + + allStates := []State{StatePending, StateProcessing, StateWaitingPersist, StateComplete, StateAbandoned} + + // these are all of the valid transitions from a state to another state + validTransitions := map[State]map[State]bool{ + StatePending: {StateProcessing: true, StateAbandoned: true}, + StateProcessing: {StateWaitingPersist: true, StateAbandoned: true}, + StateWaitingPersist: {StateComplete: true, StateAbandoned: true}, + StateComplete: {}, + StateAbandoned: {}, + } + + // iterate through all possible transitions, and validate that the valid transitions succeed, and the invalid transitions fail + pipeline, _, _, _ := createPipeline(t) + for _, currentState := range allStates { + for _, newState := range allStates[1:] { + if currentState == newState { + continue // skip since higher level code will handle this + } + + err := pipeline.validateTransition(currentState, newState) + + if validTransitions[currentState][newState] { + assert.NoError(t, err) + continue + } + + assert.ErrorIs(t, err, ErrInvalidTransition) + } + } +} diff --git a/module/executiondatasync/optimistic_sync/pipeline_test_utils.go b/module/executiondatasync/optimistic_sync/pipeline_test_utils.go new file mode 100644 index 00000000000..c221473b32b --- /dev/null +++ b/module/executiondatasync/optimistic_sync/pipeline_test_utils.go @@ -0,0 +1,130 @@ +package optimistic_sync + +import ( + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + + osmock "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// mockStateProvider is a mock implementation of a parent state provider. +// It tracks the current state and notifies the pipeline when the state changes. +type mockStateProvider struct { + state State +} + +var _ PipelineStateProvider = (*mockStateProvider)(nil) + +// NewMockStateProvider initializes a mockStateProvider with the default state StatePending. +func NewMockStateProvider() *mockStateProvider { + return &mockStateProvider{ + state: StatePending, + } +} + +// UpdateState sets the internal state and triggers a pipeline update. +func (m *mockStateProvider) UpdateState(state State, pipeline *PipelineImpl) { + m.state = state + pipeline.OnParentStateUpdated(state) +} + +// GetState returns the current internal state. + +func (m *mockStateProvider) GetState() State { + return m.state +} + +// mockStateConsumer is a mock implementation used in tests to receive state updates from the pipeline. +// It exposes a buffered channel to capture the state transitions. +type mockStateConsumer struct { + updateChan chan State +} + +var _ PipelineStateConsumer = (*mockStateConsumer)(nil) + +// NewMockStateConsumer creates a new instance of mockStateConsumer with a buffered channel. +func NewMockStateConsumer() *mockStateConsumer { + return &mockStateConsumer{ + updateChan: make(chan State, 10), + } +} + +func (m *mockStateConsumer) OnStateUpdated(state State) { + m.updateChan <- state +} + +// waitForStateUpdates waits for a sequence of state updates to occur or timeout after 500ms. +// updates must be received in the correct order or the test will fail. +func waitForStateUpdates(t *testing.T, updateChan <-chan State, expectedStates ...State) { + done := make(chan struct{}) + unittest.RequireReturnsBefore(t, func() { + for _, expected := range expectedStates { + select { + case <-done: + return + case update := <-updateChan: + assert.Equalf(t, expected, update, "expected pipeline to transition to %s, but got %s", expected, update) + } + } + }, 500*time.Millisecond, "Timeout waiting for state update") + close(done) // make sure function exists after timeout +} + +// waitNeverStateUpdate verifies that no state updates occur within 500ms. +// The test fails if any unexpected state transition is observed. +func waitNeverStateUpdate(t *testing.T, updateChan <-chan State) { + done := make(chan struct{}) + unittest.RequireNeverReturnBefore(t, func() { + select { + case <-done: + return + case newState := <-updateChan: + t.Fatalf("Pipeline transitioned to state %s, but should not have", newState) + } + }, 500*time.Millisecond, "expected pipeline to not transition to any state") + close(done) // make sure function exists after timeout +} + +// waitForErrorWithCustomCheckers waits for an error from the errChan within 500ms +// and applies custom checker functions to validate the error. +// If no checkers are provided, it asserts that no error occurred. +func waitForErrorWithCustomCheckers(t *testing.T, errChan <-chan error, errorCheckers ...func(err error)) { + unittest.RequireReturnsBefore(t, func() { + err := <-errChan + if len(errorCheckers) == 0 { + assert.NoError(t, err, "Pipeline should complete without errors") + } else { + for _, checker := range errorCheckers { + checker(err) + } + } + }, 500*time.Millisecond, "Timeout waiting for error") +} + +// waitForError waits for an error from the errChan within 500ms and asserts it matches the expected error. +func waitForError(t *testing.T, errChan <-chan error, expectedErr error) { + unittest.RequireReturnsBefore(t, func() { + err := <-errChan + if expectedErr == nil { + assert.NoError(t, err, "Pipeline should complete without errors") + } else { + assert.ErrorIs(t, err, expectedErr) + } + }, 500*time.Millisecond, "Timeout waiting for error") +} + +// createPipeline initializes and returns a pipeline instance with its mock dependencies. +// It returns the pipeline, the mocked core, a state update channel, and the parent state provider. +func createPipeline(t *testing.T) (*PipelineImpl, *osmock.Core, <-chan State, *mockStateProvider) { + mockCore := osmock.NewCore(t) + parent := NewMockStateProvider() + stateReceiver := NewMockStateConsumer() + + pipeline := NewPipeline(zerolog.Nop(), unittest.ExecutionResultFixture(), false, stateReceiver) + + return pipeline, mockCore, stateReceiver.updateChan, parent +} diff --git a/module/executiondatasync/optimistic_sync/snapshot.go b/module/executiondatasync/optimistic_sync/snapshot.go new file mode 100644 index 00000000000..3b3dd371f29 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/snapshot.go @@ -0,0 +1,26 @@ +package optimistic_sync + +import ( + "github.com/onflow/flow-go/storage" +) + +// Snapshot provides access to execution data readers for querying various data types from a specific ExecutionResult. +type Snapshot interface { + // Events returns a reader for querying event data. + Events() storage.EventsReader + + // Collections returns a reader for querying collection data. + Collections() storage.CollectionsReader + + // Transactions returns a reader for querying transaction data. + Transactions() storage.TransactionsReader + + // LightTransactionResults returns a reader for querying light transaction result data. + LightTransactionResults() storage.LightTransactionResultsReader + + // TransactionResultErrorMessages returns a reader for querying transaction error message data. + TransactionResultErrorMessages() storage.TransactionResultErrorMessagesReader + + // Registers returns a reader for querying register data. + Registers() storage.RegisterIndexReader +} diff --git a/module/executiondatasync/optimistic_sync/state.go b/module/executiondatasync/optimistic_sync/state.go new file mode 100644 index 00000000000..435c08bad8c --- /dev/null +++ b/module/executiondatasync/optimistic_sync/state.go @@ -0,0 +1,40 @@ +package optimistic_sync + +// State represents the state of the processing pipeline +type State int32 + +const ( + // StatePending is the initial state after instantiation, before Run is called + StatePending State = iota + // StateProcessing represents the state where data processing (download and indexing) has been started + StateProcessing + // StateWaitingPersist represents the state where all data is indexed, but conditions to persist are not met + StateWaitingPersist + // StateComplete represents the state where all data is persisted to storage + StateComplete + // StateAbandoned represents the state where processing was aborted + StateAbandoned +) + +// String representation of states for logging +func (s State) String() string { + switch s { + case StatePending: + return "pending" + case StateWaitingPersist: + return "waiting_persist" + case StateProcessing: + return "processing" + case StateComplete: + return "complete" + case StateAbandoned: + return "abandoned" + default: + return "" + } +} + +// IsTerminal returns true if the state is a terminal state (Complete or Abandoned). +func (s State) IsTerminal() bool { + return s == StateComplete || s == StateAbandoned +} diff --git a/module/executiondatasync/optimistic_syncing/core.go b/module/executiondatasync/optimistic_syncing/core.go deleted file mode 100644 index 3bd50c61839..00000000000 --- a/module/executiondatasync/optimistic_syncing/core.go +++ /dev/null @@ -1,19 +0,0 @@ -package pipeline - -import ( - "context" -) - -// Core defines the interface for pipeline processing steps. -// Each implementation should handle an execution data and implement the three-phase processing: -// download, index, and persist. -type Core interface { - // Download retrieves all necessary data for processing. - Download(ctx context.Context) error - - // Index processes the downloaded data and creates in-memory indexes. - Index(ctx context.Context) error - - // Persist stores the indexed data in permanent storage. - Persist(ctx context.Context) error -} diff --git a/module/executiondatasync/optimistic_syncing/pipeline.go b/module/executiondatasync/optimistic_syncing/pipeline.go deleted file mode 100644 index 8612dfe3a3a..00000000000 --- a/module/executiondatasync/optimistic_syncing/pipeline.go +++ /dev/null @@ -1,446 +0,0 @@ -package pipeline - -import ( - "context" - "errors" - "fmt" - "sync" - - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/model/flow" -) - -// State represents the state of the processing pipeline -type State int - -const ( - // StateReady is the initial state after instantiation and before downloading has begun - StateReady State = iota - // StateDownloading represents the state where data download is in progress - StateDownloading - // StateIndexing represents the state where data is being indexed - StateIndexing - // StateWaitingPersist represents the state where all data is indexed, but conditions to persist are not met - StateWaitingPersist - // StatePersisting represents the state where the indexed data is being persisted to storage - StatePersisting - // StateComplete represents the state where all data is persisted to storage - StateComplete - // StateCanceled represents the state where processing was aborted - StateCanceled -) - -// String representation of states for logging -func (s State) String() string { - switch s { - case StateReady: - return "ready" - case StateDownloading: - return "downloading" - case StateIndexing: - return "indexing" - case StateWaitingPersist: - return "waiting_persist" - case StatePersisting: - return "persisting" - case StateComplete: - return "complete" - case StateCanceled: - return "canceled" - default: - return "" - } -} - -// StateUpdate contains state update information -type StateUpdate struct { - // DescendsFromLastPersistedSealed indicates if this pipeline descends from - // the last persisted sealed result - DescendsFromLastPersistedSealed bool - // ParentState contains the state information from the parent pipeline - ParentState State -} - -// StateUpdatePublisher is a function that publishes state updates -type StateUpdatePublisher func(update StateUpdate) - -// Pipeline represents a generic processing pipeline with state transitions. -// It processes data through sequential states: Ready -> Downloading -> Indexing -> -// WaitingPersist -> Persisting -> Complete, with conditions for each transition. -type Pipeline struct { - logger zerolog.Logger - statePublisher StateUpdatePublisher - - mu sync.RWMutex - state State - isSealed bool - descendsFromSealed bool - parentState State - executionResult *flow.ExecutionResult - - core Core - stateNotifier engine.Notifier - cancel context.CancelCauseFunc -} - -// NewPipeline creates a new processing pipeline. -// Pipelines must only be created for ExecutionResults that descend from the latest persisted sealed result. -// The pipeline is initialized in the Ready state. -// -// Parameters: -// - logger: the logger to use for the pipeline -// - isSealed: indicates if the pipeline's ExecutionResult is sealed -// - executionResult: processed execution result -// - core: implements the processing logic for the pipeline -// - stateUpdatePublisher: called when the pipeline needs to broadcast state updates -// -// Returns: -// - new pipeline object -func NewPipeline( - logger zerolog.Logger, - isSealed bool, - executionResult *flow.ExecutionResult, - core Core, - stateUpdatePublisher StateUpdatePublisher, -) *Pipeline { - p := &Pipeline{ - logger: logger.With().Str("component", "pipeline").Str("execution_result_id", executionResult.ExecutionDataID.String()).Str("block_id", executionResult.BlockID.String()).Logger(), - statePublisher: stateUpdatePublisher, - state: StateReady, - isSealed: isSealed, - descendsFromSealed: true, - stateNotifier: engine.NewNotifier(), - core: core, - executionResult: executionResult, - } - - return p -} - -// Run starts the pipeline processing and blocks until completion or context cancellation. -// -// This function handles the progression through the pipeline states, executing the appropriate -// processing functions at each step. -// -// When the pipeline reaches a terminal state (StateComplete or StateCanceled), the function returns. -// The function will also return if the provided context is canceled. -// -// Returns an error if any processing step fails with an irrecoverable error. -// Returns nil if processing completes successfully, reaches a terminal state, -// or if either the parent or pipeline context is canceled. -func (p *Pipeline) Run(parentCtx context.Context) error { - ctx, cancel := context.WithCancelCause(parentCtx) - defer cancel(nil) - - p.mu.Lock() - p.cancel = cancel - p.mu.Unlock() - - notifierChan := p.stateNotifier.Channel() - - // Trigger initial check - p.stateNotifier.Notify() - - for { - select { - case <-parentCtx.Done(): - return nil - case <-ctx.Done(): - cause := context.Cause(ctx) - if cause != nil && !errors.Is(cause, context.Canceled) { - return cause - } - return nil - case <-notifierChan: - processed, err := p.processCurrentState(ctx) - if err != nil { - if errors.Is(err, context.Canceled) { - if ctxErr := context.Cause(ctx); ctxErr == nil || errors.Is(ctxErr, context.Canceled) { - return nil - } - - if parentCtxErr := context.Cause(parentCtx); parentCtxErr == nil || errors.Is(parentCtxErr, context.Canceled) { - return nil - } - - return err - } - - return err - } - if !processed { - // Terminal state reached - return nil - } - } - } -} - -// GetState returns the current state of the pipeline. -func (p *Pipeline) GetState() State { - p.mu.RLock() - defer p.mu.RUnlock() - return p.state -} - -// SetSealed marks the data as sealed, which enables transitioning from StateWaitingPersist to StatePersisting. -func (p *Pipeline) SetSealed() { - p.mu.Lock() - p.isSealed = true - p.mu.Unlock() - - // Trigger state check - p.stateNotifier.Notify() -} - -// UpdateState updates the pipeline's state based on the provided state update. -func (p *Pipeline) UpdateState(update StateUpdate) { - if shouldAbort := p.handleStateUpdate(update); !shouldAbort { - // Trigger state check - p.stateNotifier.Notify() - return - } - - // If we no longer descend from the latest, cancel the pipeline - p.transitionTo(StateCanceled) - - p.mu.RLock() - defer p.mu.RUnlock() - if p.cancel != nil { - p.cancel(fmt.Errorf("abandoning due to parent updates")) - } -} - -// handleStateUpdate updates the internal state and returns whether the pipeline -// should be abandoned. -func (p *Pipeline) handleStateUpdate(update StateUpdate) bool { - p.mu.Lock() - defer p.mu.Unlock() - previousDescendsFromLatest := p.descendsFromSealed - p.descendsFromSealed = update.DescendsFromLastPersistedSealed - p.parentState = update.ParentState - - return previousDescendsFromLatest && !update.DescendsFromLastPersistedSealed -} - -// broadcastStateUpdate sends a state update via the state publisher. -func (p *Pipeline) broadcastStateUpdate() { - if p.statePublisher == nil { - return - } - - p.mu.RLock() - update := StateUpdate{ - DescendsFromLastPersistedSealed: p.descendsFromSealed, - ParentState: p.state, - } - p.mu.RUnlock() - - p.statePublisher(update) -} - -// processCurrentState handles the current state and transitions to the next state if possible. -// It returns false when a terminal state is reached (StateComplete or StateCanceled), true otherwise. -// Returns an error if any processing step fails. -func (p *Pipeline) processCurrentState(ctx context.Context) (bool, error) { - currentState := p.GetState() - - switch currentState { - case StateReady: - return p.processReady(), nil - case StateDownloading: - return p.processDownloading(ctx) - case StateIndexing: - return p.processIndexing(ctx) - case StateWaitingPersist: - return p.processWaitingPersist(), nil - case StatePersisting: - return p.processPersisting(ctx) - case StateComplete, StateCanceled: - // Terminal states - return false, nil - default: - return false, fmt.Errorf("invalid pipeline state: %s", currentState.String()) - } -} - -// transitionTo transitions the pipeline to the given state and broadcasts -// the state change to children pipelines. -func (p *Pipeline) transitionTo(newState State) { - p.mu.Lock() - oldState := p.state - p.state = newState - p.mu.Unlock() - - p.logger.Debug(). - Str("old_state", oldState.String()). - Str("new_state", newState.String()). - Msg("pipeline state transition") - - // Broadcast state update to children - p.broadcastStateUpdate() - - // Trigger state check in case we can immediately transition again - if newState != StateComplete && newState != StateCanceled { - p.stateNotifier.Notify() - } -} - -// processReady handles the Ready state and transitions to StateDownloading if possible. -// Returns true to continue processing, false if a terminal state was reached. -func (p *Pipeline) processReady() bool { - if p.canStartDownloading() { - p.transitionTo(StateDownloading) - return true - } - return true -} - -// processDownloading handles the Downloading state. -// It executes the download function and transitions to StateIndexing if successful. -// Returns true to continue processing, false if a terminal state was reached. -// Returns an error if the download step fails. -func (p *Pipeline) processDownloading(ctx context.Context) (bool, error) { - p.logger.Debug().Msg("starting download step") - - if err := p.core.Download(ctx); err != nil { - p.logger.Error(). - Err(err). - Msg("download step failed") - return false, err - } - - p.logger.Debug().Msg("download step completed") - - if !p.canStartIndexing() { - // If we can't transition to indexing after successful download, cancel - p.transitionTo(StateCanceled) - return false, nil - } - p.transitionTo(StateIndexing) - return true, nil -} - -// processIndexing handles the Indexing state. -// It executes the index function and transitions to StateWaitingPersist if possible. -// Returns true to continue processing, false if a terminal state was reached. -// Returns an error if the index step fails. -func (p *Pipeline) processIndexing(ctx context.Context) (bool, error) { - p.logger.Debug().Msg("starting index step") - - if err := p.core.Index(ctx); err != nil { - p.logger.Error(). - Err(err). - Msg("index step failed") - return false, err - } - - p.logger.Debug().Msg("index step completed") - - if !p.canWaitForPersist() { - // If we can't transition to waiting for persist after successful indexing, cancel - p.transitionTo(StateCanceled) - return false, nil - } - - p.transitionTo(StateWaitingPersist) - return true, nil -} - -// processWaitingPersist handles the WaitingPersist state. -// It checks if the conditions for persisting are met and transitions to StatePersisting if possible. -// Returns true to continue processing, false if a terminal state was reached. -func (p *Pipeline) processWaitingPersist() bool { - if p.canStartPersisting() { - p.transitionTo(StatePersisting) - return true - } - return true -} - -// processPersisting handles the Persisting state. -// It executes the persist function and transitions to StateComplete if successful. -// Returns true to continue processing, false if a terminal state was reached. -// Returns an error if the persist step fails. -func (p *Pipeline) processPersisting(ctx context.Context) (bool, error) { - p.logger.Debug().Msg("starting persist step") - - if err := p.core.Persist(ctx); err != nil { - p.logger.Error(). - Err(err). - Msg("persist step failed") - return false, err - } - - p.logger.Debug().Msg("persist step completed") - p.transitionTo(StateComplete) - return false, nil -} - -// canStartDownloading checks if the pipeline can transition from Ready to Downloading. -// -// Conditions for transition: -// 1. The current state must be Ready -// 2. The pipeline must descend from the last persisted sealed result -// 3. The parent pipeline must be in an active state (StateDownloading, StateIndexing, -// StateWaitingPersist, StatePersisting, or StateComplete) -func (p *Pipeline) canStartDownloading() bool { - p.mu.RLock() - defer p.mu.RUnlock() - - if p.state != StateReady { - return false - } - - if !p.descendsFromSealed { - return false - } - - switch p.parentState { - case StateDownloading, StateIndexing, StateWaitingPersist, StatePersisting, StateComplete: - return true - default: - return false - } -} - -// canStartIndexing checks if the pipeline can transition from Downloading to Indexing. -// -// Conditions for transition: -// 1. The current state must be Downloading -// 2. The pipeline must descend from the last persisted sealed result -// 3. The parent pipeline must not be canceled -func (p *Pipeline) canStartIndexing() bool { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.state == StateDownloading && p.descendsFromSealed && p.parentState != StateCanceled -} - -// canWaitForPersist checks if the pipeline can transition from Indexing to WaitingPersist. -// -// Conditions for transition: -// 1. The current state must be Indexing -// 2. The pipeline must descend from the last persisted sealed result -// 3. The parent pipeline must not be canceled -func (p *Pipeline) canWaitForPersist() bool { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.state == StateIndexing && p.descendsFromSealed && p.parentState != StateCanceled -} - -// canStartPersisting checks if the pipeline can transition from WaitingPersist to Persisting. -// -// Conditions for transition: -// 1. The current state must be WaitingPersist -// 2. The data must be sealed -// 3. The parent pipeline must be complete -func (p *Pipeline) canStartPersisting() bool { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.state == StateWaitingPersist && p.isSealed && p.parentState == StateComplete -} diff --git a/module/executiondatasync/optimistic_syncing/pipeline_test.go b/module/executiondatasync/optimistic_syncing/pipeline_test.go deleted file mode 100644 index bcc3511afd3..00000000000 --- a/module/executiondatasync/optimistic_syncing/pipeline_test.go +++ /dev/null @@ -1,418 +0,0 @@ -package pipeline - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - - osmock "github.com/onflow/flow-go/module/executiondatasync/optimistic_syncing/mock" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestPipelineStateTransitions verifies that the pipeline correctly transitions -// through states when provided with the correct conditions. -func TestPipelineStateTransitions(t *testing.T) { - // Create channels for state updates - updateChan := make(chan StateUpdate, 10) - - // Create publisher function - publisher := func(update StateUpdate) { - updateChan <- update - } - - // Create mock core - mockCore := osmock.NewCore(t) - mockCore.On("Download", mock.Anything).Return(nil) - mockCore.On("Index", mock.Anything).Return(nil) - mockCore.On("Persist", mock.Anything).Return(nil) - - // Create a pipeline - pipeline := NewPipeline(zerolog.Nop(), false, unittest.ExecutionResultFixture(), mockCore, publisher) - - // Start the pipeline in a goroutine - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - errChan := make(chan error) - go func() { - errChan <- pipeline.Run(ctx) - }() - - // Assert initial state - assert.Equal(t, StateReady, pipeline.GetState()) - - // Send parent update to trigger state transition - pipeline.UpdateState(StateUpdate{ - DescendsFromLastPersistedSealed: true, - ParentState: StateComplete, // Assume that parent is already complete - }) - - // Wait for pipeline to reach WaitingPersist state - waitForStateUpdate(t, updateChan, StateWaitingPersist) - assert.Equal(t, StateWaitingPersist, pipeline.GetState(), "Pipeline should be in WaitingPersist state") - mockCore.AssertCalled(t, "Download", mock.Anything) - mockCore.AssertCalled(t, "Index", mock.Anything) - mockCore.AssertNotCalled(t, "Persist") - - // Mark the execution result as sealed to trigger persisting - pipeline.SetSealed() - - waitForStateUpdate(t, updateChan, StateComplete) - - mockCore.AssertCalled(t, "Persist", mock.Anything) - - // Cancel the context after the pipeline is already complete - // At this point, the pipeline has already finished successfully and returned nil - cancel() - - // Check that the pipeline has completed without errors - select { - case err := <-errChan: - assert.NoError(t, err, "Pipeline should complete without errors") - case <-time.After(500 * time.Millisecond): - t.Fatal("Timeout waiting for pipeline to return") - } -} - -// TestPipelineCancellation verifies that a pipeline is properly canceled when -// it no longer descends from the last persisted sealed result. -func TestPipelineCancellation(t *testing.T) { - // Create channels for state updates - updateChan := make(chan StateUpdate, 10) - - // Create publisher function - publisher := func(update StateUpdate) { - updateChan <- update - } - - // Set up a download function that signals when it starts and sleeps - downloadStarted := make(chan struct{}) - - // Create a mock core with a slow download - mockCore := osmock.NewCore(t) - mockCore.On("Download", mock.Anything).Run(func(args mock.Arguments) { - close(downloadStarted) // Signal that download has started - - // Simulate long-running operation - time.Sleep(100 * time.Millisecond) - }).Return(nil) - - // Create a pipeline - pipeline := NewPipeline(zerolog.Nop(), false, unittest.ExecutionResultFixture(), mockCore, publisher) - - // Start the pipeline - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - errChan := make(chan error) - go func() { - errChan <- pipeline.Run(ctx) - }() - - // Send an update that allows starting - pipeline.UpdateState(StateUpdate{ - DescendsFromLastPersistedSealed: true, - ParentState: StateComplete, - }) - - // Wait for download to start - select { - case <-downloadStarted: - // Download started - case <-time.After(500 * time.Millisecond): - t.Fatal("Timeout waiting for download to start") - } - - // Now send an update that causes cancellation - pipeline.UpdateState(StateUpdate{ - DescendsFromLastPersistedSealed: false, // No longer descends from latest - ParentState: StateComplete, - }) - - // Check the error channel - select { - case err := <-errChan: - // Check if we got an error as expected - if err != nil { - assert.Contains(t, err.Error(), "abandoning due to parent updates", - "Error should indicate abandonment") - } else { - // If no error, just log it - the pipeline was canceled but returned nil - t.Log("Pipeline was canceled but returned nil error") - } - case <-time.After(500 * time.Millisecond): - t.Fatal("Timeout waiting for pipeline to complete") - } - - // Check for state updates - found := false - timeout := time.After(100 * time.Millisecond) - - for !found { - select { - case update := <-updateChan: - if !update.DescendsFromLastPersistedSealed { - found = true - } - case <-timeout: - // It's ok if we don't find it - the pipeline might have been canceled before broadcasting - t.Log("No update with descendsFromSealed=false found within timeout") - break - } - } -} - -// TestPipelineParentDependentTransitions verifies that a pipeline's transitions -// depend on the parent pipeline's state. -func TestPipelineParentDependentTransitions(t *testing.T) { - // Create channels for state updates - updateChan := make(chan StateUpdate, 10) - - // Create publisher function - publisher := func(update StateUpdate) { - updateChan <- update - } - - // Create a mock core - mockCore := osmock.NewCore(t) - mockCore.On("Download", mock.Anything).Return(nil) - mockCore.On("Index", mock.Anything).Return(nil) - mockCore.On("Persist", mock.Anything).Return(nil) - - // Create a pipeline - pipeline := NewPipeline(zerolog.Nop(), false, unittest.ExecutionResultFixture(), mockCore, publisher) - - // Start the pipeline - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - errChan := make(chan error) - go func() { - errChan <- pipeline.Run(ctx) - }() - - // Initial update - parent in Ready state - pipeline.UpdateState(StateUpdate{ - DescendsFromLastPersistedSealed: true, - ParentState: StateReady, - }) - - // Sleep a bit to allow processing - time.Sleep(50 * time.Millisecond) - - // Check that pipeline is still in Ready state - assert.Equal(t, StateReady, pipeline.GetState(), "Pipeline should remain in Ready state") - mockCore.AssertNotCalled(t, "Download") - - // Update parent to downloading - pipeline.UpdateState(StateUpdate{ - DescendsFromLastPersistedSealed: true, - ParentState: StateDownloading, - }) - - // Wait for pipeline to progress to WaitingPersist - waitForStateUpdate(t, updateChan, StateWaitingPersist) - assert.Equal(t, StateWaitingPersist, pipeline.GetState(), "Pipeline should progress to WaitingPersist state") - mockCore.AssertCalled(t, "Download", mock.Anything) - mockCore.AssertCalled(t, "Index", mock.Anything) - mockCore.AssertNotCalled(t, "Persist") - - // Update parent to complete - should allow persisting when sealed - pipeline.UpdateState(StateUpdate{ - DescendsFromLastPersistedSealed: true, - ParentState: StateComplete, - }) - - // Mark the execution result as sealed to trigger persisting - pipeline.SetSealed() - - // Wait for pipeline to complete - waitForStateUpdate(t, updateChan, StateComplete) - assert.Equal(t, StateComplete, pipeline.GetState(), "Pipeline should reach Complete state") - mockCore.AssertCalled(t, "Persist", mock.Anything) - - // Cancel the context to end the goroutine - cancel() -} - -// TestPipelineErrorHandling verifies that errors from Core methods are properly -// propagated back to the caller. -func TestPipelineErrorHandling(t *testing.T) { - // Test cases for different stages of processing - testCases := []struct { - name string - setupMock func(mock *osmock.Core, expectedErr error) - expectedErr error - }{ - { - name: "Download Error", - setupMock: func(m *osmock.Core, expectedErr error) { - m.On("Download", mock.Anything).Return(expectedErr) - }, - expectedErr: errors.New("download error"), - }, - { - name: "Index Error", - setupMock: func(m *osmock.Core, expectedErr error) { - m.On("Download", mock.Anything).Return(nil) - m.On("Index", mock.Anything).Return(expectedErr) - }, - expectedErr: errors.New("index error"), - }, - { - name: "Persist Error", - setupMock: func(m *osmock.Core, expectedErr error) { - m.On("Download", mock.Anything).Return(nil) - m.On("Index", mock.Anything).Return(nil) - m.On("Persist", mock.Anything).Return(expectedErr) - }, - expectedErr: errors.New("persist error"), - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Create channels for state updates - updateChan := make(chan StateUpdate, 10) - - // Create publisher function - publisher := func(update StateUpdate) { - updateChan <- update - } - - // Create a mock core with the specified setup - mockCore := osmock.NewCore(t) - tc.setupMock(mockCore, tc.expectedErr) - - // Create a pipeline - pipeline := NewPipeline(zerolog.Nop(), true, unittest.ExecutionResultFixture(), mockCore, publisher) - - // Start the pipeline - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - errChan := make(chan error) - go func() { - errChan <- pipeline.Run(ctx) - }() - - // Send parent update to trigger processing - pipeline.UpdateState(StateUpdate{ - DescendsFromLastPersistedSealed: true, - ParentState: StateComplete, - }) - - // Wait for error - select { - case err := <-errChan: - assert.Error(t, err, "Pipeline should propagate the Core error") - assert.ErrorIs(t, err, tc.expectedErr) - case <-time.After(500 * time.Millisecond): - t.Fatal("Timeout waiting for error") - } - }) - } -} - -// TestBroadcastStateUpdate verifies that descendsFromSealed is correctly propagated -// and is based on the pipeline's state, not just forwarded from the parent. -func TestBroadcastStateUpdate(t *testing.T) { - // Create channels for state updates - updateChan := make(chan StateUpdate, 10) - - // Create publisher function - publisher := func(update StateUpdate) { - updateChan <- update - } - - // Create mock core - mockCore := osmock.NewCore(t) - mockCore.On("Download", mock.Anything).Return(nil) - mockCore.On("Index", mock.Anything).Return(nil) - - // Create a pipeline - pipeline := NewPipeline(zerolog.Nop(), false, unittest.ExecutionResultFixture(), mockCore, publisher) - - // Start the pipeline - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - errChan := make(chan error) - go func() { - errChan <- pipeline.Run(ctx) - }() - - // Send a state update to trigger a broadcast to children - pipeline.UpdateState(StateUpdate{ - DescendsFromLastPersistedSealed: true, - ParentState: StateDownloading, - }) - - // Wait for an update to be sent to children - update := waitForStateUpdate(t, updateChan, StateWaitingPersist) - - // Check that the update has the correct flag - assert.True(t, update.DescendsFromLastPersistedSealed, "Initial update should indicate descends=true") - - // Now simulate this pipeline being canceled - pipeline.UpdateState(StateUpdate{ - DescendsFromLastPersistedSealed: false, // No longer descends - ParentState: StateReady, - }) - - // Wait for the pipeline to complete - select { - case err := <-errChan: - if err != nil { - assert.Contains(t, err.Error(), "abandoning due to parent updates", - "Error should indicate abandonment") - } else { - t.Log("Pipeline was canceled but returned nil error") - } - case <-time.After(500 * time.Millisecond): - t.Fatal("Timeout waiting for pipeline to complete") - } - - // Verify the pipeline transitioned to canceled state - assert.Equal(t, StateCanceled, pipeline.GetState(), "Pipeline should be in canceled state") - - // Drain the update channel to check if any updates indicate descended=false - // This approach uses a timeout to prevent hanging - canceledUpdateFound := false - drainTimeout := time.After(100 * time.Millisecond) - - for !canceledUpdateFound { - select { - case update := <-updateChan: - if !update.DescendsFromLastPersistedSealed && update.ParentState == StateCanceled { - canceledUpdateFound = true - } - case <-drainTimeout: - t.Log("No update with descendsFromSealed=false and state=canceled found within timeout") - return - } - } -} - -// Helper function to wait for a specific state update -func waitForStateUpdate(t *testing.T, updateChan <-chan StateUpdate, expectedState State) StateUpdate { - timeoutChan := time.After(500 * time.Millisecond) - - for { - select { - case update := <-updateChan: - if update.ParentState == expectedState { - return update - } - // Continue waiting if this isn't the state we're looking for - case <-timeoutChan: - t.Fatalf("Timed out waiting for state update to %s", expectedState) - return StateUpdate{} // Never reached, just to satisfy compiler - } - } -} diff --git a/module/executiondatasync/storage/datastore_factory.go b/module/executiondatasync/storage/datastore_factory.go new file mode 100644 index 00000000000..cb42c576cd2 --- /dev/null +++ b/module/executiondatasync/storage/datastore_factory.go @@ -0,0 +1,54 @@ +package storage + +import ( + "fmt" + "os" + "path/filepath" + + badgerds "github.com/ipfs/go-ds-badger2" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/module/executiondatasync/execution_data" +) + +// CreateDatastoreManager creates a new datastore manager of the specified type. +// It supports both Badger and Pebble datastores. +func CreateDatastoreManager( + logger zerolog.Logger, + executionDataDir string, + executionDataDBModeStr string, +) (DatastoreManager, error) { + + // create the datastore directory if it does not exist + datastoreDir := filepath.Join(executionDataDir, "blobstore") + err := os.MkdirAll(datastoreDir, 0700) + if err != nil { + return nil, err + } + + // parse the execution data DB mode + executionDataDBMode, err := execution_data.ParseExecutionDataDBMode(executionDataDBModeStr) + if err != nil { + return nil, fmt.Errorf("could not parse execution data DB mode: %w", err) + } + + // create the appropriate datastore manager based on the DB mode + var executionDatastoreManager DatastoreManager + if executionDataDBMode == execution_data.ExecutionDataDBModePebble { + logger.Info().Msgf("Using Pebble datastore for execution data at %s", datastoreDir) + executionDatastoreManager, err = NewPebbleDatastoreManager( + logger.With().Str("pebbledb", "endata").Logger(), + datastoreDir, nil) + if err != nil { + return nil, fmt.Errorf("could not create PebbleDatastoreManager for execution data: %w", err) + } + } else { + logger.Info().Msgf("Using Badger datastore for execution data at %s", datastoreDir) + executionDatastoreManager, err = NewBadgerDatastoreManager(datastoreDir, &badgerds.DefaultOptions) + if err != nil { + return nil, fmt.Errorf("could not create BadgerDatastoreManager for execution data: %w", err) + } + } + + return executionDatastoreManager, nil +} diff --git a/module/executiondatasync/storage/pebble_datastore_manager.go b/module/executiondatasync/storage/pebble_datastore_manager.go index 2be4ed2a702..9ab6aa66ac4 100644 --- a/module/executiondatasync/storage/pebble_datastore_manager.go +++ b/module/executiondatasync/storage/pebble_datastore_manager.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" ds "github.com/ipfs/go-datastore" pebbleds "github.com/ipfs/go-ds-pebble" "github.com/rs/zerolog" @@ -44,7 +44,7 @@ func NewPebbleDatastoreManager(logger zerolog.Logger, path string, options *pebb return nil, fmt.Errorf("failed to open db: %w", err) } - ds, err := pebbleds.NewDatastore(path, options, pebbleds.WithPebbleDB(db)) + ds, err := pebbleds.NewDatastore(path, pebbleds.WithPebbleDB(db)) if err != nil { return nil, fmt.Errorf("could not open tracker ds: %w", err) } diff --git a/module/finalizedreader/finalizedreader_test.go b/module/finalizedreader/finalizedreader_test.go index e9a97133dc5..38d45e4993e 100644 --- a/module/finalizedreader/finalizedreader_test.go +++ b/module/finalizedreader/finalizedreader_test.go @@ -4,48 +4,66 @@ import ( "errors" "testing" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" ) func TestFinalizedReader(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() // prepare the storage.Headers instance metrics := metrics.NewNoopCollector() - headers := badgerstorage.NewHeaders(metrics, db) - block := unittest.BlockFixture() + all := store.InitAll(metrics, db) + blocks := all.Blocks + headers := all.Headers + block1 := unittest.BlockFixture() - // store header - err := headers.Store(block.Header) - require.NoError(t, err) + // store `block1` + unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, &block1) + }) + }) - // index the header - err = db.Update(operation.IndexBlockHeight(block.Header.Height, block.ID())) - require.NoError(t, err) + // finalize `block1` + unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, block1.Header.Height, block1.ID()) + }) + }) - // verify is able to reader the finalized block ID - reader := NewFinalizedReader(headers, block.Header.Height) - finalized, err := reader.FinalizedBlockIDAtHeight(block.Header.Height) + // verify that `FinalizedReader` reads values from database that are not yet cached, eg. right after initialization + reader := NewFinalizedReader(headers, block1.Header.Height) + finalized, err := reader.FinalizedBlockIDAtHeight(block1.Header.Height) require.NoError(t, err) - require.Equal(t, block.ID(), finalized) + require.Equal(t, block1.ID(), finalized) - // verify is able to return storage.NotFound when the height is not finalized - _, err = reader.FinalizedBlockIDAtHeight(block.Header.Height + 1) + // verify that `FinalizedReader` returns storage.NotFound when the height is not finalized + _, err = reader.FinalizedBlockIDAtHeight(block1.Header.Height + 1) require.Error(t, err) require.True(t, errors.Is(err, storage.ErrNotFound), err) - // finalize one more block - block2 := unittest.BlockWithParentFixture(block.Header) - require.NoError(t, headers.Store(block2.Header)) - err = db.Update(operation.IndexBlockHeight(block2.Header.Height, block2.ID())) - require.NoError(t, err) + // store and finalize one more block + block2 := unittest.BlockWithParentFixture(block1.Header) + unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, block2) + }) + }) + unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, block2.Header.Height, block2.ID()) + }) + }) + + // We declare `block2` as via the `FinalizedReader` reader.BlockFinalized(block2.Header) // should be able to retrieve the block @@ -53,7 +71,7 @@ func TestFinalizedReader(t *testing.T) { require.NoError(t, err) require.Equal(t, block2.ID(), finalized) - // should noop and no panic - reader.BlockProcessable(block.Header, block2.Header.QuorumCertificate()) + // repeated calls should be noop and no panic + reader.BlockProcessable(block1.Header, block2.Header.ParentQC()) }) } diff --git a/module/finalizer/collection/finalizer.go b/module/finalizer/collection/finalizer.go index e5b2e21904f..496035584e6 100644 --- a/module/finalizer/collection/finalizer.go +++ b/module/finalizer/collection/finalizer.go @@ -3,15 +3,16 @@ package collection import ( "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/engine/collection" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/procedure" ) // Finalizer is a simple wrapper around our temporary state to clean up after a @@ -19,7 +20,8 @@ import ( // finalized collection from the mempool and updating the finalized boundary in // the cluster state. type Finalizer struct { - db *badger.DB + db storage.DB + lockManager lockctx.Manager transactions mempool.Transactions pusher collection.GuaranteedCollectionPublisher metrics module.CollectionMetrics @@ -27,13 +29,15 @@ type Finalizer struct { // NewFinalizer creates a new finalizer for collection nodes. func NewFinalizer( - db *badger.DB, + db storage.DB, + lockManager lockctx.Manager, transactions mempool.Transactions, pusher collection.GuaranteedCollectionPublisher, metrics module.CollectionMetrics, ) *Finalizer { f := &Finalizer{ db: db, + lockManager: lockManager, transactions: transactions, pusher: pusher, metrics: metrics, @@ -53,61 +57,69 @@ func NewFinalizer( // pools and persistent storage. // No errors are expected during normal operation. func (f *Finalizer) MakeFinal(blockID flow.Identifier) error { - return operation.RetryOnConflict(f.db.Update, func(tx *badger.Txn) error { + // Acquire a lock for finalizing cluster blocks + lctx := f.lockManager.NewContext() + defer lctx.Release() + if err := lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock); err != nil { + return fmt.Errorf("could not acquire lock: %w", err) + } - // retrieve the header of the block we want to finalize - var header flow.Header - err := operation.RetrieveHeader(blockID, &header)(tx) - if err != nil { - return fmt.Errorf("could not retrieve header: %w", err) - } + reader := f.db.Reader() + // retrieve the header of the block we want to finalize + var header flow.Header + err := operation.RetrieveHeader(reader, blockID, &header) + if err != nil { + return fmt.Errorf("could not retrieve header: %w", err) + } - // retrieve the current finalized cluster state boundary - var boundary uint64 - err = operation.RetrieveClusterFinalizedHeight(header.ChainID, &boundary)(tx) - if err != nil { - return fmt.Errorf("could not retrieve boundary: %w", err) - } + // retrieve the current finalized cluster state boundary + var boundary uint64 + err = operation.RetrieveClusterFinalizedHeight(reader, header.ChainID, &boundary) + if err != nil { + return fmt.Errorf("could not retrieve boundary: %w", err) + } - // retrieve the ID of the last finalized block as marker for stopping - var headID flow.Identifier - err = operation.LookupClusterBlockHeight(header.ChainID, boundary, &headID)(tx) - if err != nil { - return fmt.Errorf("could not retrieve head: %w", err) - } + // retrieve the ID of the last finalized block as marker for stopping + var headID flow.Identifier + err = operation.LookupClusterBlockHeight(reader, header.ChainID, boundary, &headID) + if err != nil { + return fmt.Errorf("could not retrieve head: %w", err) + } - // there are no blocks to finalize, we may have already finalized - // this block - exit early - if boundary >= header.Height { - return nil - } + // there are no blocks to finalize, we may have already finalized + // this block - exit early + if boundary >= header.Height { + return nil + } - // To finalize all blocks from the currently finalized one up to and - // including the current, we first enumerate each of these blocks. - // We start at the youngest block and remember all visited blocks, - // while tracing back until we reach the finalized state - steps := []*flow.Header{&header} - parentID := header.ParentID - for parentID != headID { - var parent flow.Header - err = operation.RetrieveHeader(parentID, &parent)(tx) - if err != nil { - return fmt.Errorf("could not retrieve parent (%x): %w", parentID, err) - } - steps = append(steps, &parent) - parentID = parent.ParentID + // To finalize all blocks from the currently finalized one up to and + // including the current, we first enumerate each of these blocks. + // We start at the youngest block and remember all visited blocks, + // while tracing back until we reach the finalized state + steps := []*flow.Header{&header} + parentID := header.ParentID + for parentID != headID { + var parent flow.Header + err = operation.RetrieveHeader(reader, parentID, &parent) + if err != nil { + return fmt.Errorf("could not retrieve parent (%x): %w", parentID, err) } + steps = append(steps, &parent) + parentID = parent.ParentID + } - // now we can step backwards in order to go from oldest to youngest; for - // each header, we reconstruct the block and then apply the related - // changes to the protocol state - for i := len(steps) - 1; i >= 0; i-- { + // now we can step backwards in order to go from oldest to youngest; for + // each header, we reconstruct the block and then apply the related + // changes to the protocol state + for i := len(steps) - 1; i >= 0; i-- { + err := f.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { clusterBlockID := steps[i].ID() // look up the transactions included in the payload step := steps[i] var payload cluster.Payload - err = procedure.RetrieveClusterPayload(clusterBlockID, &payload)(tx) + // This does not require a lock, as a block's payload once set never changes. + err = procedure.RetrieveClusterPayload(rw.GlobalReader(), clusterBlockID, &payload) if err != nil { return fmt.Errorf("could not retrieve payload for cluster block (id=%x): %w", clusterBlockID, err) } @@ -120,7 +132,7 @@ func (f *Finalizer) MakeFinal(blockID flow.Identifier) error { } // finalize the block in cluster state - err = procedure.FinalizeClusterBlock(clusterBlockID)(tx) + err = procedure.FinalizeClusterBlock(lctx, rw, clusterBlockID) if err != nil { return fmt.Errorf("could not finalize cluster block (id=%x): %w", clusterBlockID, err) } @@ -134,17 +146,18 @@ func (f *Finalizer) MakeFinal(blockID flow.Identifier) error { // if the finalized collection is empty, we don't need to include it // in the reference height index or submit it to consensus nodes if len(payload.Collection.Transactions) == 0 { - continue + return nil } // look up the reference block height to populate index var refBlock flow.Header - err = operation.RetrieveHeader(payload.ReferenceBlockID, &refBlock)(tx) + // This does not require a lock, as a block's header once set never changes. + err = operation.RetrieveHeader(rw.GlobalReader(), payload.ReferenceBlockID, &refBlock) if err != nil { return fmt.Errorf("could not retrieve reference block (id=%x): %w", payload.ReferenceBlockID, err) } // index the finalized cluster block by reference block height - err = operation.IndexClusterBlockByReferenceHeight(refBlock.Height, clusterBlockID)(tx) + err = operation.IndexClusterBlockByReferenceHeight(lctx, rw.Writer(), refBlock.Height, clusterBlockID) if err != nil { return fmt.Errorf("could not index cluster block (id=%x) by reference height (%d): %w", clusterBlockID, refBlock.Height, err) } @@ -158,16 +171,24 @@ func (f *Finalizer) MakeFinal(blockID flow.Identifier) error { // For now, we just use the parent signers as the guarantors of this // collection. - // TODO add real signatures here (https://github.com/onflow/flow-go-internal/issues/4569) - f.pusher.SubmitCollectionGuarantee(&flow.CollectionGuarantee{ - CollectionID: payload.Collection.ID(), - ReferenceBlockID: payload.ReferenceBlockID, - ChainID: header.ChainID, - SignerIndices: step.ParentVoterIndices, - Signature: nil, // TODO: to remove because it's not easily verifiable by consensus nodes + // only submit the collection guarantee if the write is successful + storage.OnCommitSucceed(rw, func() { + // TODO add real signatures here (https://github.com/onflow/flow-go-internal/issues/4569) + f.pusher.SubmitCollectionGuarantee(&flow.CollectionGuarantee{ + CollectionID: payload.Collection.ID(), + ReferenceBlockID: payload.ReferenceBlockID, + ChainID: header.ChainID, + SignerIndices: step.ParentVoterIndices, + Signature: nil, // TODO: to remove because it's not easily verifiable by consensus nodes + }) }) + + return nil + }) + if err != nil { + return fmt.Errorf("could not finalize cluster block (%x): %w", steps[i].ID(), err) } + } - return nil - }) + return nil } diff --git a/module/finalizer/collection/finalizer_test.go b/module/finalizer/collection/finalizer_test.go index 25cd280e6ca..fa8cd93e5de 100644 --- a/module/finalizer/collection/finalizer_test.go +++ b/module/finalizer/collection/finalizer_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -15,28 +16,33 @@ import ( "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" cluster "github.com/onflow/flow-go/state/cluster/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/procedure" "github.com/onflow/flow-go/utils/unittest" ) func TestFinalizer(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + // This test has to build on top of badgerdb, because the cleanup method depends + // on the badgerdb.DropAll method to wipe the database, which pebble does not support. + unittest.RunWithBadgerDB(t, func(badgerdb *badger.DB) { + lockManager := storage.NewTestingLockManager() + db := badgerimpl.ToDB(badgerdb) // reference block on the main consensus chain refBlock := unittest.BlockHeaderFixture() // genesis block for the cluster chain genesis := model.Genesis() metrics := metrics.NewNoopCollector() + pool := herocache.NewTransactions(1000, unittest.Logger(), metrics) var state *cluster.State - pool := herocache.NewTransactions(1000, unittest.Logger(), metrics) - // a helper function to clean up shared state between tests cleanup := func() { // wipe the DB - err := db.DropAll() + err := badgerdb.DropAll() require.NoError(t, err) // clear the mempool for _, tx := range pool.All() { @@ -48,15 +54,26 @@ func TestFinalizer(t *testing.T) { bootstrap := func() { stateRoot, err := cluster.NewStateRoot(genesis, unittest.QuorumCertificateFixture(), 0) require.NoError(t, err) - state, err = cluster.Bootstrap(db, stateRoot) + state, err = cluster.Bootstrap(db, lockManager, stateRoot) require.NoError(t, err) - err = db.Update(operation.InsertHeader(refBlock.ID(), refBlock)) + + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(lctx, rw, refBlock.ID(), refBlock) + }) require.NoError(t, err) + lctx.Release() } // a helper function to insert a block - insert := func(block model.Block) { - err := db.Update(procedure.InsertClusterBlock(&block)) + insert := func(db storage.DB, lockManager lockctx.Manager, block model.Block) { + lctx := lockManager.NewContext() + defer lctx.Release() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.InsertClusterBlock(lctx, rw, &block) + }) assert.NoError(t, err) } @@ -65,7 +82,7 @@ func TestFinalizer(t *testing.T) { defer cleanup() pusher := collectionmock.NewGuaranteedCollectionPublisher(t) - finalizer := collection.NewFinalizer(db, pool, pusher, metrics) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) fakeBlockID := unittest.IdentifierFixture() err := finalizer.MakeFinal(fakeBlockID) @@ -78,7 +95,7 @@ func TestFinalizer(t *testing.T) { pusher := collectionmock.NewGuaranteedCollectionPublisher(t) pusher.On("SubmitCollectionGuarantee", mock.Anything).Once() - finalizer := collection.NewFinalizer(db, pool, pusher, metrics) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) // tx1 is included in the finalized block tx1 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ProposalKey.SequenceNumber = 1 }) @@ -87,7 +104,7 @@ func TestFinalizer(t *testing.T) { // create a new block on genesis block := unittest.ClusterBlockWithParent(genesis) block.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx1)) - insert(block) + insert(db, lockManager, block) // finalize the block err := finalizer.MakeFinal(block.ID()) @@ -103,13 +120,13 @@ func TestFinalizer(t *testing.T) { defer cleanup() pusher := collectionmock.NewGuaranteedCollectionPublisher(t) - finalizer := collection.NewFinalizer(db, pool, pusher, metrics) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) // create a new block that isn't connected to a parent block := unittest.ClusterBlockWithParent(genesis) block.Header.ParentID = unittest.IdentifierFixture() block.SetPayload(model.EmptyPayload(refBlock.ID())) - insert(block) + insert(db, lockManager, block) // try to finalize - this should fail err := finalizer.MakeFinal(block.ID()) @@ -121,12 +138,12 @@ func TestFinalizer(t *testing.T) { defer cleanup() pusher := collectionmock.NewGuaranteedCollectionPublisher(t) - finalizer := collection.NewFinalizer(db, pool, pusher, metrics) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) // create a block with empty payload on genesis block := unittest.ClusterBlockWithParent(genesis) block.SetPayload(model.EmptyPayload(refBlock.ID())) - insert(block) + insert(db, lockManager, block) // finalize the block err := finalizer.MakeFinal(block.ID()) @@ -138,7 +155,6 @@ func TestFinalizer(t *testing.T) { assert.Equal(t, block.ID(), final.ID()) // collection should not have been propagated - pusher.AssertNotCalled(t, "SubmitCollectionGuarantee", mock.Anything) }) t.Run("finalize single block", func(t *testing.T) { @@ -146,7 +162,7 @@ func TestFinalizer(t *testing.T) { defer cleanup() pusher := collectionmock.NewGuaranteedCollectionPublisher(t) - finalizer := collection.NewFinalizer(db, pool, pusher, metrics) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) // tx1 is included in the finalized block and mempool tx1 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ProposalKey.SequenceNumber = 1 }) @@ -158,7 +174,7 @@ func TestFinalizer(t *testing.T) { // create a block containing tx1 on top of genesis block := unittest.ClusterBlockWithParent(genesis) block.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx1)) - insert(block) + insert(db, lockManager, block) // block should be passed to pusher pusher.On("SubmitCollectionGuarantee", &flow.CollectionGuarantee{ @@ -182,7 +198,7 @@ func TestFinalizer(t *testing.T) { final, err := state.Final().Head() assert.NoError(t, err) assert.Equal(t, block.ID(), final.ID()) - assertClusterBlocksIndexedByReferenceHeight(t, db, refBlock.Height, final.ID()) + assertClusterBlocksIndexedByReferenceHeight(t, lockManager, db, refBlock.Height, final.ID()) }) // when finalizing a block with un-finalized ancestors, those ancestors should be finalized as well @@ -191,7 +207,7 @@ func TestFinalizer(t *testing.T) { defer cleanup() pusher := collectionmock.NewGuaranteedCollectionPublisher(t) - finalizer := collection.NewFinalizer(db, pool, pusher, metrics) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) // tx1 is included in the first finalized block and mempool tx1 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ProposalKey.SequenceNumber = 1 }) @@ -203,12 +219,12 @@ func TestFinalizer(t *testing.T) { // create a block containing tx1 on top of genesis block1 := unittest.ClusterBlockWithParent(genesis) block1.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx1)) - insert(block1) + insert(db, lockManager, block1) // create a block containing tx2 on top of block1 block2 := unittest.ClusterBlockWithParent(&block1) block2.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx2)) - insert(block2) + insert(db, lockManager, block2) // both blocks should be passed to pusher pusher.On("SubmitCollectionGuarantee", &flow.CollectionGuarantee{ @@ -238,7 +254,7 @@ func TestFinalizer(t *testing.T) { final, err := state.Final().Head() assert.NoError(t, err) assert.Equal(t, block2.ID(), final.ID()) - assertClusterBlocksIndexedByReferenceHeight(t, db, refBlock.Height, block1.ID(), block2.ID()) + assertClusterBlocksIndexedByReferenceHeight(t, lockManager, db, refBlock.Height, block1.ID(), block2.ID()) }) t.Run("finalize with un-finalized child", func(t *testing.T) { @@ -246,7 +262,7 @@ func TestFinalizer(t *testing.T) { defer cleanup() pusher := collectionmock.NewGuaranteedCollectionPublisher(t) - finalizer := collection.NewFinalizer(db, pool, pusher, metrics) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) // tx1 is included in the finalized parent block and mempool tx1 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ProposalKey.SequenceNumber = 1 }) @@ -258,12 +274,12 @@ func TestFinalizer(t *testing.T) { // create a block containing tx1 on top of genesis block1 := unittest.ClusterBlockWithParent(genesis) block1.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx1)) - insert(block1) + insert(db, lockManager, block1) // create a block containing tx2 on top of block1 block2 := unittest.ClusterBlockWithParent(&block1) block2.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx2)) - insert(block2) + insert(db, lockManager, block2) // block should be passed to pusher pusher.On("SubmitCollectionGuarantee", &flow.CollectionGuarantee{ @@ -287,7 +303,7 @@ func TestFinalizer(t *testing.T) { final, err := state.Final().Head() assert.NoError(t, err) assert.Equal(t, block1.ID(), final.ID()) - assertClusterBlocksIndexedByReferenceHeight(t, db, refBlock.Height, block1.ID()) + assertClusterBlocksIndexedByReferenceHeight(t, lockManager, db, refBlock.Height, block1.ID()) }) // when finalizing a block with a conflicting fork, the fork should not be finalized. @@ -296,7 +312,7 @@ func TestFinalizer(t *testing.T) { defer cleanup() pusher := collectionmock.NewGuaranteedCollectionPublisher(t) - finalizer := collection.NewFinalizer(db, pool, pusher, metrics) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) // tx1 is included in the finalized block and mempool tx1 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ProposalKey.SequenceNumber = 1 }) @@ -308,12 +324,12 @@ func TestFinalizer(t *testing.T) { // create a block containing tx1 on top of genesis block1 := unittest.ClusterBlockWithParent(genesis) block1.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx1)) - insert(block1) + insert(db, lockManager, block1) // create a block containing tx2 on top of genesis (conflicting with block1) block2 := unittest.ClusterBlockWithParent(genesis) block2.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx2)) - insert(block2) + insert(db, lockManager, block2) // block should be passed to pusher pusher.On("SubmitCollectionGuarantee", &flow.CollectionGuarantee{ @@ -337,7 +353,7 @@ func TestFinalizer(t *testing.T) { final, err := state.Final().Head() assert.NoError(t, err) assert.Equal(t, block1.ID(), final.ID()) - assertClusterBlocksIndexedByReferenceHeight(t, db, refBlock.Height, block1.ID()) + assertClusterBlocksIndexedByReferenceHeight(t, lockManager, db, refBlock.Height, block1.ID()) }) }) } @@ -345,9 +361,12 @@ func TestFinalizer(t *testing.T) { // assertClusterBlocksIndexedByReferenceHeight checks the given cluster blocks have // been indexed by the given reference block height, which is expected as part of // finalization. -func assertClusterBlocksIndexedByReferenceHeight(t *testing.T, db *badger.DB, refHeight uint64, clusterBlockIDs ...flow.Identifier) { +func assertClusterBlocksIndexedByReferenceHeight(t *testing.T, lockManager lockctx.Manager, db storage.DB, refHeight uint64, clusterBlockIDs ...flow.Identifier) { var ids []flow.Identifier - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(refHeight, refHeight, &ids)) + lctx := lockManager.NewContext() + defer lctx.Release() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), refHeight, refHeight, &ids) require.NoError(t, err) assert.ElementsMatch(t, clusterBlockIDs, ids) } diff --git a/module/finalizer/consensus/finalizer.go b/module/finalizer/consensus/finalizer.go index 6bf8bfdf2dd..5caff16d08e 100644 --- a/module/finalizer/consensus/finalizer.go +++ b/module/finalizer/consensus/finalizer.go @@ -4,38 +4,36 @@ import ( "context" "fmt" - "github.com/dgraph-io/badger/v2" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/operation" ) // Finalizer is a simple wrapper around our temporary state to clean up after a // block has been fully finalized to the persistent protocol state. type Finalizer struct { - db *badger.DB - headers storage.Headers - state protocol.FollowerState - cleanup CleanupFunc - tracer module.Tracer + dbReader storage.Reader + headers storage.Headers + state protocol.FollowerState + cleanup CleanupFunc + tracer module.Tracer } // NewFinalizer creates a new finalizer for the temporary state. -func NewFinalizer(db *badger.DB, +func NewFinalizer(dbReader storage.Reader, headers storage.Headers, state protocol.FollowerState, tracer module.Tracer, options ...func(*Finalizer)) *Finalizer { f := &Finalizer{ - db: db, - state: state, - headers: headers, - cleanup: CleanupNothing(), - tracer: tracer, + dbReader: dbReader, + state: state, + headers: headers, + cleanup: CleanupNothing(), + tracer: tracer, } for _, option := range options { option(f) @@ -62,7 +60,7 @@ func (f *Finalizer) MakeFinal(blockID flow.Identifier) error { // that height, it's an invalid operation. Otherwise, it is a no-op. var finalized uint64 - err := f.db.View(operation.RetrieveFinalizedHeight(&finalized)) + err := operation.RetrieveFinalizedHeight(f.dbReader, &finalized) if err != nil { return fmt.Errorf("could not retrieve finalized height: %w", err) } @@ -89,7 +87,7 @@ func (f *Finalizer) MakeFinal(blockID flow.Identifier) error { // back to the last finalized block, this is also an invalid call. var finalID flow.Identifier - err = f.db.View(operation.LookupBlockHeight(finalized, &finalID)) + err = operation.LookupBlockHeight(f.dbReader, finalized, &finalID) if err != nil { return fmt.Errorf("could not retrieve finalized header: %w", err) } diff --git a/module/finalizer/consensus/finalizer_test.go b/module/finalizer/consensus/finalizer_test.go index 35b20705ec4..9b895d94717 100644 --- a/module/finalizer/consensus/finalizer_test.go +++ b/module/finalizer/consensus/finalizer_test.go @@ -13,9 +13,10 @@ import ( "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" mockprot "github.com/onflow/flow-go/state/protocol/mock" - storage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - mockstor "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -26,18 +27,6 @@ func LogCleanup(list *[]flow.Identifier) func(flow.Identifier) error { } } -func TestNewFinalizer(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - headers := &mockstor.Headers{} - state := &mockprot.FollowerState{} - tracer := trace.NewNoopTracer() - fin := NewFinalizer(db, headers, state, tracer) - assert.Equal(t, fin.db, db) - assert.Equal(t, fin.headers, headers) - assert.Equal(t, fin.state, state) - }) -} - // TestMakeFinalValidChain checks whether calling `MakeFinal` with the ID of a valid // descendant block of the latest finalized header results in the finalization of the // valid descendant and all of its parents up to the finalized header, but excluding @@ -75,33 +64,55 @@ func TestMakeFinalValidChain(t *testing.T) { var list []flow.Identifier unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbImpl := badgerimpl.ToDB(db) + + // set up lock context + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockFinalizeBlock) + require.NoError(t, err) // insert the latest finalized height - err := db.Update(operation.InsertFinalizedHeight(final.Height)) + err = dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertFinalizedHeight(lctx, rw.Writer(), final.Height) + }) require.NoError(t, err) // map the finalized height to the finalized block ID - err = db.Update(operation.IndexBlockHeight(final.Height, final.ID())) + err = dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, final.Height, final.ID()) + }) require.NoError(t, err) + lctx.Release() // insert the finalized block header into the DB - err = db.Update(operation.InsertHeader(final.ID(), final)) + insertLctx := lockManager.NewContext() + require.NoError(t, insertLctx.AcquireLock(storage.LockInsertBlock)) + err = dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(insertLctx, rw, final.ID(), final) + }) require.NoError(t, err) + insertLctx.Release() // insert all of the pending blocks into the DB for _, header := range pending { - err = db.Update(operation.InsertHeader(header.ID(), header)) + insertLctx2 := lockManager.NewContext() + require.NoError(t, insertLctx2.AcquireLock(storage.LockInsertBlock)) + err = dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(insertLctx2, rw, header.ID(), header) + }) require.NoError(t, err) + insertLctx2.Release() } // initialize the finalizer with the dependencies and make the call metrics := metrics.NewNoopCollector() fin := Finalizer{ - db: db, - headers: storage.NewHeaders(metrics, db), - state: state, - tracer: trace.NewNoopTracer(), - cleanup: LogCleanup(&list), + dbReader: badgerimpl.ToDB(db).Reader(), + headers: store.NewHeaders(metrics, badgerimpl.ToDB(db)), + state: state, + tracer: trace.NewNoopTracer(), + cleanup: LogCleanup(&list), } err = fin.MakeFinal(lastID) require.NoError(t, err) @@ -133,31 +144,53 @@ func TestMakeFinalInvalidHeight(t *testing.T) { var list []flow.Identifier unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbImpl := badgerimpl.ToDB(db) + + // set up lock context + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockFinalizeBlock) + require.NoError(t, err) // insert the latest finalized height - err := db.Update(operation.InsertFinalizedHeight(final.Height)) + err = dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertFinalizedHeight(lctx, rw.Writer(), final.Height) + }) require.NoError(t, err) // map the finalized height to the finalized block ID - err = db.Update(operation.IndexBlockHeight(final.Height, final.ID())) + err = dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, final.Height, final.ID()) + }) require.NoError(t, err) + lctx.Release() // insert the finalized block header into the DB - err = db.Update(operation.InsertHeader(final.ID(), final)) + insertLctx := lockManager.NewContext() + require.NoError(t, insertLctx.AcquireLock(storage.LockInsertBlock)) + err = dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(insertLctx, rw, final.ID(), final) + }) require.NoError(t, err) + insertLctx.Release() // insert all of the pending header into DB - err = db.Update(operation.InsertHeader(pending.ID(), pending)) + insertLctx = lockManager.NewContext() + require.NoError(t, insertLctx.AcquireLock(storage.LockInsertBlock)) + err = dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(insertLctx, rw, pending.ID(), pending) + }) require.NoError(t, err) + insertLctx.Release() // initialize the finalizer with the dependencies and make the call metrics := metrics.NewNoopCollector() fin := Finalizer{ - db: db, - headers: storage.NewHeaders(metrics, db), - state: state, - tracer: trace.NewNoopTracer(), - cleanup: LogCleanup(&list), + dbReader: badgerimpl.ToDB(db).Reader(), + headers: store.NewHeaders(metrics, badgerimpl.ToDB(db)), + state: state, + tracer: trace.NewNoopTracer(), + cleanup: LogCleanup(&list), } err = fin.MakeFinal(pending.ID()) require.Error(t, err) @@ -185,27 +218,43 @@ func TestMakeFinalDuplicate(t *testing.T) { var list []flow.Identifier unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbImpl := badgerimpl.ToDB(db) + // set up lock context + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockFinalizeBlock) + require.NoError(t, err) // insert the latest finalized height - err := db.Update(operation.InsertFinalizedHeight(final.Height)) + err = dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertFinalizedHeight(lctx, rw.Writer(), final.Height) + }) require.NoError(t, err) // map the finalized height to the finalized block ID - err = db.Update(operation.IndexBlockHeight(final.Height, final.ID())) + err = dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, final.Height, final.ID()) + }) require.NoError(t, err) + lctx.Release() // insert the finalized block header into the DB - err = db.Update(operation.InsertHeader(final.ID(), final)) + insertLctx := lockManager.NewContext() + require.NoError(t, insertLctx.AcquireLock(storage.LockInsertBlock)) + err = dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(insertLctx, rw, final.ID(), final) + }) require.NoError(t, err) + insertLctx.Release() // initialize the finalizer with the dependencies and make the call metrics := metrics.NewNoopCollector() fin := Finalizer{ - db: db, - headers: storage.NewHeaders(metrics, db), - state: state, - tracer: trace.NewNoopTracer(), - cleanup: LogCleanup(&list), + dbReader: badgerimpl.ToDB(db).Reader(), + headers: store.NewHeaders(metrics, badgerimpl.ToDB(db)), + state: state, + tracer: trace.NewNoopTracer(), + cleanup: LogCleanup(&list), } err = fin.MakeFinal(final.ID()) require.NoError(t, err) diff --git a/module/metrics/labels.go b/module/metrics/labels.go index aed6ef64b4a..3d595c812b0 100644 --- a/module/metrics/labels.go +++ b/module/metrics/labels.go @@ -52,6 +52,7 @@ const ( ResourceProposal = "proposal" ResourceHeader = "header" ResourceFinalizedHeight = "finalized_height" + ResourceCertifiedView = "certified_view" ResourceIndex = "index" ResourceIdentity = "identity" ResourceGuarantee = "guarantee" diff --git a/module/pruner/pruners/chunk_data_pack_test.go b/module/pruner/pruners/chunk_data_pack_test.go index c32b6a40465..c72b3690ea8 100644 --- a/module/pruner/pruners/chunk_data_pack_test.go +++ b/module/pruner/pruners/chunk_data_pack_test.go @@ -5,13 +5,13 @@ import ( "fmt" "testing" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" - storagebadger "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/operation/pebbleimpl" "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" @@ -22,9 +22,10 @@ func TestChunkDataPackPruner(t *testing.T) { unittest.RunWithBadgerDB(t, func(badgerDB *badger.DB) { unittest.RunWithPebbleDB(t, func(pebbleDB *pebble.DB) { m := metrics.NewNoopCollector() - results := storagebadger.NewExecutionResults(m, badgerDB) - transactions := storagebadger.NewTransactions(m, badgerDB) - collections := storagebadger.NewCollections(badgerDB, transactions) + db := badgerimpl.ToDB(badgerDB) + results := store.NewExecutionResults(m, db) + transactions := store.NewTransactions(m, db) + collections := store.NewCollections(db, transactions) byChunkIDCacheSize := uint(10) pdb := pebbleimpl.ToDB(pebbleDB) chunks := store.NewChunkDataPacks(m, pdb, collections, byChunkIDCacheSize) diff --git a/module/queue/concurrent_priority_queue.go b/module/queue/concurrent_priority_queue.go new file mode 100644 index 00000000000..9f2c53be39b --- /dev/null +++ b/module/queue/concurrent_priority_queue.go @@ -0,0 +1,75 @@ +package queue + +import ( + "container/heap" + "sync" + + "github.com/onflow/flow-go/engine" +) + +// ConcurrentPriorityQueue is a thread-safe priority queue that provides a channel-based notification +// mechanism when items are inserted. +// All methods are safe for concurrent access. +type ConcurrentPriorityQueue[T any] struct { + queue PriorityQueue[T] + smallerValuesFirst bool + notifier engine.Notifier + mu sync.RWMutex +} + +// NewConcurrentPriorityQueue creates a new instance of ConcurrentPriorityQueue. +// If smallerValuesFirst is true, inverts the priority so items with lower values take precedence. +func NewConcurrentPriorityQueue[T any](smallerValuesFirst bool) *ConcurrentPriorityQueue[T] { + return &ConcurrentPriorityQueue[T]{ + queue: PriorityQueue[T]{}, + smallerValuesFirst: smallerValuesFirst, + notifier: engine.NewNotifier(), + } +} + +// Len returns the number of items currently in the queue. +func (mq *ConcurrentPriorityQueue[T]) Len() int { + mq.mu.RLock() + defer mq.mu.RUnlock() + + return mq.queue.Len() +} + +// Push adds a new item to the queue with the specified priority. +// A notification is sent on the channel if it's not already full. +func (mq *ConcurrentPriorityQueue[T]) Push(item T, priority uint64) { + mq.mu.Lock() + defer mq.mu.Unlock() + + // if smaller values are higher priority, invert the priority value since the heap will always + // return the largest value first. + if mq.smallerValuesFirst { + priority = ^priority + } + + heap.Push(&mq.queue, NewPriorityQueueItem(item, priority)) + + mq.notifier.Notify() +} + +// Pop removes and immediately returns the highest priority item from the queue. +// If the queue is empty, false is returned. +// If multiple items have the same priority, the oldest one by insertion time is returned. +func (mq *ConcurrentPriorityQueue[T]) Pop() (T, bool) { + mq.mu.Lock() + defer mq.mu.Unlock() + + if mq.queue.Len() == 0 { + var nilT T + return nilT, false + } + + item := heap.Pop(&mq.queue).(*PriorityQueueItem[T]) + return item.Message(), true +} + +// Channel returns a signal channel that receives a signal when an item is inserted. +// This allows consumers to be notified of new items without polling. +func (mq *ConcurrentPriorityQueue[T]) Channel() <-chan struct{} { + return mq.notifier.Channel() +} diff --git a/module/queue/concurrent_priority_queue_test.go b/module/queue/concurrent_priority_queue_test.go new file mode 100644 index 00000000000..734295da80d --- /dev/null +++ b/module/queue/concurrent_priority_queue_test.go @@ -0,0 +1,538 @@ +package queue + +import ( + "context" + "fmt" + "math" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewConcurrentPriorityQueue tests the constructor +func TestNewConcurrentPriorityQueue(t *testing.T) { + t.Run("creates queue with larger values first", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + assert.NotNil(t, mq) + assert.NotNil(t, mq.queue) + assert.False(t, mq.smallerValuesFirst) + assert.Equal(t, 0, mq.Len()) + }) + + t.Run("creates queue with smaller values first", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](true) + + assert.NotNil(t, mq) + assert.NotNil(t, mq.queue) + assert.True(t, mq.smallerValuesFirst) + assert.Equal(t, 0, mq.Len()) + }) +} + +// TestConcurrentPriorityQueue_Len tests the Len method +func TestConcurrentPriorityQueue_Len(t *testing.T) { + t.Run("empty queue returns 0", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + assert.Equal(t, 0, mq.Len()) + }) + + t.Run("queue with items returns correct length", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + mq.Push("item1", 1) + mq.Push("item2", 2) + mq.Push("item3", 3) + + assert.Equal(t, 3, mq.Len()) + }) + + t.Run("length decreases after pop", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + mq.Push("item1", 1) + mq.Push("item2", 2) + + assert.Equal(t, 2, mq.Len()) + + _, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, 1, mq.Len()) + }) +} + +// TestConcurrentPriorityQueue_Push tests the Push method +func TestConcurrentPriorityQueue_Push(t *testing.T) { + t.Run("push adds items with larger values first", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + mq.Push("low", 1) + mq.Push("high", 10) + mq.Push("medium", 5) + + assert.Equal(t, 3, mq.Len()) + + // Pop items and verify they come out in priority order + item1, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "high", item1) + + item2, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "medium", item2) + + item3, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "low", item3) + + _, ok = mq.Pop() + assert.False(t, ok) + }) + + t.Run("push adds items with smaller values first", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](true) + + mq.Push("high", 10) + mq.Push("low", 1) + mq.Push("medium", 5) + + assert.Equal(t, 3, mq.Len()) + + // Pop items and verify they come out in priority order (smaller first) + item1, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "low", item1) + + item2, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "medium", item2) + + item3, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "high", item3) + + _, ok = mq.Pop() + assert.False(t, ok) + }) + + t.Run("push with zero priority", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + mq.Push("zero", 0) + mq.Push("high", 100) + + // Zero priority should come last when larger values are first + item1, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "high", item1) + + item2, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "zero", item2) + }) + + t.Run("push with zero priority and smaller values first", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](true) + + mq.Push("high", 100) + mq.Push("zero", 0) + + // Zero priority should come first when smaller values are first + item1, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "zero", item1) + + item2, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "high", item2) + }) +} + +// TestConcurrentPriorityQueue_Pop tests the Pop method +func TestConcurrentPriorityQueue_Pop(t *testing.T) { + t.Run("pop on empty queue returns false", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + item, ok := mq.Pop() + assert.False(t, ok) + var zero string + assert.Equal(t, zero, item) + }) + + t.Run("pop returns items in priority order", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + mq.Push("item1", 1) + mq.Push("item3", 3) + mq.Push("item2", 2) + + // Should come out in descending priority order + item1, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "item3", item1) + + item2, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "item2", item2) + + item3, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "item1", item3) + + _, ok = mq.Pop() + assert.False(t, ok) + }) + + t.Run("pop with equal priorities uses timestamp ordering", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + // Add items with same priority but different timestamps + mq.Push("first", 5) + time.Sleep(time.Millisecond) + mq.Push("second", 5) + time.Sleep(time.Millisecond) + mq.Push("third", 5) + + // Should come out in insertion order (oldest first) + item1, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "first", item1) + + item2, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "second", item2) + + item3, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "third", item3) + }) +} + +// TestConcurrentPriorityQueue_Channel tests the Channel method +func TestConcurrentPriorityQueue_Channel(t *testing.T) { + t.Run("channel receives notification on push", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + ch := mq.Channel() + + // Push an item + mq.Push("test", 1) + + // Should receive notification + select { + case <-ch: + // Success + case <-time.After(100 * time.Millisecond): + t.Fatal("did not receive notification within timeout") + } + }) + + t.Run("channel does not block on multiple pushes", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + ch := mq.Channel() + + // Push multiple items rapidly + for i := 0; i < 10; i++ { + mq.Push("test", uint64(i)) + } + + // Should receive at least one notification + select { + case <-ch: + // Success + case <-time.After(100 * time.Millisecond): + t.Fatal("did not receive notification within timeout") + } + + // Channel should be buffered and not block + assert.Equal(t, 10, mq.Len()) + }) + + t.Run("channel is buffered", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + ch := mq.Channel() + + // Push multiple items without reading from channel + for i := 0; i < 5; i++ { + mq.Push("test", uint64(i)) + } + + // Should not block + assert.Equal(t, 5, mq.Len()) + + // Should be able to read from channel + select { + case <-ch: + // Success + case <-time.After(100 * time.Millisecond): + t.Fatal("did not receive notification within timeout") + } + }) +} + +// TestConcurrentPriorityQueue_Concurrency tests thread safety +func TestConcurrentPriorityQueue_Concurrency(t *testing.T) { + t.Run("concurrent push operations", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[int](false) + numGoroutines := 1000 + var wg sync.WaitGroup + + // Start multiple goroutines pushing items + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + mq.Push(id, uint64(id)) + }(i) + } + + wg.Wait() + + // Verify all items were added + assert.Equal(t, numGoroutines, mq.Len()) + + // Verify items can be popped correctly + popped := make(map[int]bool) + for i := 0; i < numGoroutines; i++ { + item, ok := mq.Pop() + assert.True(t, ok) + assert.False(t, popped[item], "duplicate item popped: %d", item) + popped[item] = true + } + + assert.Equal(t, numGoroutines, len(popped)) + }) + + t.Run("concurrent push and pop operations", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[int](false) + numGoroutines := 1000 + var wg sync.WaitGroup + var mu sync.Mutex + popped := make(map[int]bool) + + // Start goroutines that push items + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + mq.Push(id, uint64(id)) + }(i) + } + + // Wait for all pushes to complete + wg.Wait() + + // Now start goroutines that pop items + var popWg sync.WaitGroup + for i := 0; i < numGoroutines/2; i++ { + popWg.Add(1) + go func() { + defer popWg.Done() + for { + item, ok := mq.Pop() + if !ok { + break + } + mu.Lock() + assert.False(t, popped[item], "duplicate item popped: %d", item) + popped[item] = true + mu.Unlock() + } + }() + } + + popWg.Wait() + + // Verify no duplicates and all items were processed + assert.Equal(t, numGoroutines, len(popped)) + }) + + t.Run("concurrent len operations", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[int](false) + numGoroutines := 1000 + var wg sync.WaitGroup + + // Start goroutines that push items + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + mq.Push(id, uint64(id)) + }(i) + } + + // Start goroutines that call Len + for i := 0; i < numGoroutines/2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for j := 0; j < 100; j++ { + _ = mq.Len() + } + }() + } + + wg.Wait() + + // Verify final length + assert.Equal(t, numGoroutines, mq.Len()) + }) +} + +// TestConcurrentPriorityQueue_EdgeCases tests edge cases +func TestConcurrentPriorityQueue_EdgeCases(t *testing.T) { + t.Run("max uint64 priority with larger values first", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + mq.Push("normal", 1000) + mq.Push("max", math.MaxUint64) + + item, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "max", item) + }) + + t.Run("max uint64 priority with smaller values first", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](true) + + mq.Push("normal", 1000) + mq.Push("max", math.MaxUint64) + + item, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "normal", item) // max priority becomes 0 after inversion + }) + + t.Run("empty queue after popping all items", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + mq.Push("item1", 1) + mq.Push("item2", 2) + + assert.Equal(t, 2, mq.Len()) + + _, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, 1, mq.Len()) + + _, ok = mq.Pop() + assert.True(t, ok) + assert.Equal(t, 0, mq.Len()) + + _, ok = mq.Pop() + assert.False(t, ok) + assert.Equal(t, 0, mq.Len()) + }) + + t.Run("channel notification after queue becomes empty", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + ch := mq.Channel() + + mq.Push("item", 1) + + // Read the notification + <-ch + + // Pop the item + _, ok := mq.Pop() + assert.True(t, ok) + + // Push another item + mq.Push("item2", 2) + + // Should receive another notification + select { + case <-ch: + // Success + case <-time.After(100 * time.Millisecond): + t.Fatal("did not receive notification within timeout") + } + }) +} + +// TestConcurrentPriorityQueue_Integration tests integration scenarios +func TestConcurrentPriorityQueue_Integration(t *testing.T) { + t.Run("mixed operations with different priorities", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + // Add items with various priorities + mq.Push("urgent", 100) + mq.Push("normal", 50) + mq.Push("low", 10) + mq.Push("critical", 200) + mq.Push("medium", 75) + + // Pop all items and verify order + expected := []string{"critical", "urgent", "medium", "normal", "low"} + for _, exp := range expected { + item, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, exp, item) + } + + _, ok := mq.Pop() + assert.False(t, ok) + }) + + t.Run("priority inversion with mixed operations", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](true) + + // Add items with various priorities + mq.Push("urgent", 100) + mq.Push("normal", 50) + mq.Push("low", 10) + mq.Push("critical", 200) + mq.Push("medium", 75) + + // Pop all items and verify order (smaller values first) + expected := []string{"low", "normal", "medium", "urgent", "critical"} + for _, exp := range expected { + item, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, exp, item) + } + + _, ok := mq.Pop() + assert.False(t, ok) + }) + + t.Run("queue processing using channel", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mq := NewConcurrentPriorityQueue[string](true) + + itemCount := 100 + go func() { + for i := range itemCount { + mq.Push(fmt.Sprintf("item-%d", i), uint64(i)) + } + }() + + unittest.RequireReturnsBefore(t, func() { + for i := 0; i < itemCount; { + select { + case <-ctx.Done(): + return + case <-mq.Channel(): + } + + for { + message, ok := mq.Pop() + if !ok { + break + } + assert.Equal(t, fmt.Sprintf("item-%d", i), message) + i++ + } + } + }, time.Second, "did not receive all messages within timeout") + + // make sure the queue is empty + assert.Zero(t, mq.Len()) + }) +} diff --git a/module/queue/priority_queue.go b/module/queue/priority_queue.go new file mode 100644 index 00000000000..75a330e7b30 --- /dev/null +++ b/module/queue/priority_queue.go @@ -0,0 +1,107 @@ +package queue + +import ( + "container/heap" + "fmt" + "time" +) + +// PriorityQueueItem is a generic item in the priority queue. +// Each item contains a message, priority value, and metadata for queue management. +// PriorityQueueItems are immutable once created and safe for concurrent access. +type PriorityQueueItem[T any] struct { + // message is the actual item in the queue. + message T + + // priority is the priority of the item in the queue. + // Larger priority values are dequeued first. + priority uint64 + + // index is the index of the item in the heap. + // The index is required by update() and is maintained by the heap.Interface methods. + index int + + // timestamp to maintain insertions order for items with the same priority and for telemetry + timestamp time.Time +} + +// NewPriorityQueueItem creates a new PriorityQueueItem with the given message and priority. +func NewPriorityQueueItem[T any](message T, priority uint64) *PriorityQueueItem[T] { + return &PriorityQueueItem[T]{ + message: message, + priority: priority, + index: -1, // index is set when the item is pushed to the heap + timestamp: time.Now(), + } +} + +// Message returns the message stored in the item. +func (item *PriorityQueueItem[T]) Message() T { + return item.message +} + +var _ heap.Interface = (*PriorityQueue[any])(nil) + +// PriorityQueue implements heap.Interface and holds PriorityQueueItems. +// It provides a priority queue where items with larger priority values +// are dequeued first. For items with equal priority, the oldest item (by insertion time) +// is dequeued first. +// CAUTION: not concurrency safe! Caller must implement their own synchronization. +type PriorityQueue[T any] []*PriorityQueueItem[T] + +// Len returns the number of items in the priority queue. +// CAUTION: not concurrency safe! +func (pq PriorityQueue[T]) Len() int { return len(pq) } + +// Less determines the ordering of items in the priority queue. +// PriorityQueueItems with larger priority values come first. For items with equal priority, +// the oldest item (by insertion timestamp) comes first. +// Returns true if and only if item at index i should come before item at index j. +// CAUTION: not concurrency safe! +func (pq PriorityQueue[T]) Less(i, j int) bool { + // We want Pop to give us the highest, not lowest, priority so we use greater than here. + if pq[i].priority > pq[j].priority { + return true + } + if pq[i].priority < pq[j].priority { + return false + } + // if both items have the same priority, then pop the oldest + return pq[i].timestamp.Before(pq[j].timestamp) +} + +// Swap exchanges the items at the given indices and updates their heap indices. +// CAUTION: not concurrency safe! +func (pq PriorityQueue[T]) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +// Push adds an item to the priority queue. +// The item's index is automatically set to its position in the heap. +// The item must be of type `*PriorityQueueItem[T]` otherwise the method will panic. +// CAUTION: not concurrency safe! +func (pq *PriorityQueue[T]) Push(x any) { + n := len(*pq) + item, ok := x.(*PriorityQueueItem[T]) + if !ok { + panic(fmt.Sprintf("unexpected type added to priority queue: %T", x)) + } + item.index = n + *pq = append(*pq, item) +} + +// Pop removes and returns the highest priority item from the queue. +// The returned item will have the highest priority value, or if multiple items +// have the same priority, the oldest one by insertion time. +// CAUTION: not concurrency safe! +func (pq *PriorityQueue[T]) Pop() any { + old := *pq + n := len(old) + item := old[n-1] + old[n-1] = nil // avoid memory leak + item.index = -1 // for safety + *pq = old[0 : n-1] + return item +} diff --git a/module/queue/priority_queue_test.go b/module/queue/priority_queue_test.go new file mode 100644 index 00000000000..c7c8b65d025 --- /dev/null +++ b/module/queue/priority_queue_test.go @@ -0,0 +1,337 @@ +package queue + +import ( + "container/heap" + "math" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestPriorityQueueItem tests the PriorityQueueItem struct and its methods +func TestPriorityQueueItem(t *testing.T) { + t.Run("NewPriorityQueueItem creates item with correct values", func(t *testing.T) { + message := "test message" + priority := uint64(42) + + item := NewPriorityQueueItem(message, priority) + + assert.Equal(t, message, item.message) + assert.Equal(t, priority, item.priority) + assert.Equal(t, -1, item.index) + assert.WithinDuration(t, time.Now(), item.timestamp, time.Minute) + }) + + t.Run("Message returns the stored message", func(t *testing.T) { + message := "test message" + item := NewPriorityQueueItem(message, 1) + + result := item.Message() + + assert.Equal(t, message, result) + }) +} + +// TestPriorityQueue_Len tests the Len method +func TestPriorityQueue_Len(t *testing.T) { + t.Run("empty queue has length 0", func(t *testing.T) { + pq := PriorityQueue[string]{} + + assert.Equal(t, 0, pq.Len()) + }) + + t.Run("queue with items has correct length", func(t *testing.T) { + pq := PriorityQueue[string]{ + NewPriorityQueueItem("item1", 1), + NewPriorityQueueItem("item2", 2), + NewPriorityQueueItem("item3", 3), + } + + assert.Equal(t, 3, pq.Len()) + }) +} + +// TestPriorityQueue_Less tests the Less method for priority ordering +func TestPriorityQueue_Less(t *testing.T) { + t.Run("higher priority comes first", func(t *testing.T) { + pq := PriorityQueue[string]{ + NewPriorityQueueItem("low", 1), + NewPriorityQueueItem("high", 10), + } + + // high priority should be "less" (come first in heap) + assert.True(t, pq.Less(1, 0)) + assert.False(t, pq.Less(0, 1)) + }) + + t.Run("equal priority uses timestamp ordering", func(t *testing.T) { + now := time.Now() + item1 := &PriorityQueueItem[string]{ + message: "first", + priority: 5, + timestamp: now, + } + item2 := &PriorityQueueItem[string]{ + message: "second", + priority: 5, + timestamp: now.Add(time.Millisecond), + } + + pq := PriorityQueue[string]{item1, item2} + + // older timestamp should be "less" (come first in heap) + assert.True(t, pq.Less(0, 1)) + assert.False(t, pq.Less(1, 0)) + }) + + t.Run("same priority and timestamp", func(t *testing.T) { + now := time.Now() + item1 := &PriorityQueueItem[string]{ + message: "item1", + priority: 5, + timestamp: now, + } + item2 := &PriorityQueueItem[string]{ + message: "item2", + priority: 5, + timestamp: now, + } + + pq := PriorityQueue[string]{item1, item2} + + // Should be consistent (not less) + assert.False(t, pq.Less(0, 1)) + assert.False(t, pq.Less(1, 0)) + }) +} + +// TestPriorityQueue_Swap tests the Swap method +func TestPriorityQueue_Swap(t *testing.T) { + t.Run("swap exchanges items and updates indices", func(t *testing.T) { + item1 := NewPriorityQueueItem("item1", 1) + item2 := NewPriorityQueueItem("item2", 2) + + pq := PriorityQueue[string]{item1, item2} + + // Set initial indices + pq[0].index = 0 + pq[1].index = 1 + + // Swap items + pq.Swap(0, 1) + + // Check that items are swapped + assert.Equal(t, "item2", pq[0].message) + assert.Equal(t, "item1", pq[1].message) + + // Check that indices are updated + assert.Equal(t, 0, pq[0].index) + assert.Equal(t, 1, pq[1].index) + }) +} + +// TestPriorityQueue_Push tests the Push method +func TestPriorityQueue_Push(t *testing.T) { + t.Run("push adds item to queue", func(t *testing.T) { + pq := &PriorityQueue[string]{} + item := NewPriorityQueueItem("test", 5) + + pq.Push(item) + + assert.Equal(t, 1, pq.Len()) + assert.Equal(t, item, (*pq)[0]) + assert.Equal(t, 0, item.index) + }) + + t.Run("push sets correct index", func(t *testing.T) { + pq := &PriorityQueue[string]{} + item1 := NewPriorityQueueItem("item1", 1) + item2 := NewPriorityQueueItem("item2", 2) + + pq.Push(item1) + pq.Push(item2) + + assert.Equal(t, 0, item1.index) + assert.Equal(t, 1, item2.index) + }) + + t.Run("push panics on non-PriorityQueueItem", func(t *testing.T) { + pq := &PriorityQueue[string]{} + initialLen := pq.Len() + + defer func() { + r := recover() + require.Equal(t, "unexpected type added to priority queue: string", r) + assert.Equal(t, initialLen, pq.Len()) + }() + + pq.Push("not an item") + }) +} + +// TestPriorityQueue_Pop tests the Pop method +func TestPriorityQueue_Pop(t *testing.T) { + t.Run("pop removes and returns last item", func(t *testing.T) { + item1 := NewPriorityQueueItem("item1", 1) + item2 := NewPriorityQueueItem("item2", 2) + + pq := &PriorityQueue[string]{item1, item2} + initialLen := pq.Len() + + result := pq.Pop() + + assert.Equal(t, item2, result) + assert.Equal(t, initialLen-1, pq.Len()) + assert.Equal(t, -1, item2.index) + }) +} + +// TestPriorityQueue_HeapOperations tests the priority queue as a heap +func TestPriorityQueue_HeapOperations(t *testing.T) { + t.Run("heap operations maintain priority order", func(t *testing.T) { + pq := &PriorityQueue[string]{} + heap.Init(pq) + + // Add items with different priorities + heap.Push(pq, NewPriorityQueueItem("low", 1)) + heap.Push(pq, NewPriorityQueueItem("high", 10)) + heap.Push(pq, NewPriorityQueueItem("medium", 5)) + + // Pop items and verify they come out in priority order + item1 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "high", item1.message) + assert.Equal(t, uint64(10), item1.priority) + + item2 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "medium", item2.message) + assert.Equal(t, uint64(5), item2.priority) + + item3 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "low", item3.message) + assert.Equal(t, uint64(1), item3.priority) + + assert.Equal(t, 0, pq.Len()) + }) + + t.Run("heap operations with equal priorities use timestamp", func(t *testing.T) { + pq := &PriorityQueue[string]{} + heap.Init(pq) + + // Add items with same priority but different timestamps + item1 := NewPriorityQueueItem("first", 5) + time.Sleep(time.Millisecond) + item2 := NewPriorityQueueItem("second", 5) + time.Sleep(time.Millisecond) + item3 := NewPriorityQueueItem("third", 5) + + heap.Push(pq, item2) // Add in different order + heap.Push(pq, item3) + heap.Push(pq, item1) + + // Pop items and verify they come out in timestamp order (oldest first) + result1 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "first", result1.message) + + result2 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "second", result2.message) + + result3 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "third", result3.message) + }) + + t.Run("heap operations with mixed priorities and timestamps", func(t *testing.T) { + pq := &PriorityQueue[string]{} + heap.Init(pq) + + // Add items with different priorities and timestamps + item1 := NewPriorityQueueItem("low1", 1) + time.Sleep(time.Millisecond) + item2 := NewPriorityQueueItem("low2", 1) + time.Sleep(time.Millisecond) + item3 := NewPriorityQueueItem("high1", 10) + time.Sleep(time.Millisecond) + item4 := NewPriorityQueueItem("high2", 10) + + heap.Push(pq, item4) + heap.Push(pq, item1) + heap.Push(pq, item3) + heap.Push(pq, item2) + + // Pop items and verify order + results := make([]string, 4) + for i := 0; i < 4; i++ { + item := heap.Pop(pq).(*PriorityQueueItem[string]) + results[i] = item.message + } + + // High priority items should come first, then low priority items + // Within same priority, older timestamps should come first + expected := []string{"high1", "high2", "low1", "low2"} + assert.Equal(t, expected, results) + }) +} + +// TestPriorityQueue_EdgeCases tests edge cases and error conditions +func TestPriorityQueue_EdgeCases(t *testing.T) { + t.Run("empty queue operations", func(t *testing.T) { + pq := &PriorityQueue[string]{} + heap.Init(pq) + + assert.Equal(t, 0, pq.Len()) + + // Pop on empty queue should panic (heap behavior) + assert.Panics(t, func() { + heap.Pop(pq) + }) + }) + + t.Run("single item queue", func(t *testing.T) { + pq := &PriorityQueue[string]{} + heap.Init(pq) + + item := NewPriorityQueueItem("single", 5) + heap.Push(pq, item) + + assert.Equal(t, 1, pq.Len()) + + result := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, item, result) + assert.Equal(t, 0, pq.Len()) + }) + + t.Run("zero priority items", func(t *testing.T) { + pq := &PriorityQueue[string]{} + heap.Init(pq) + + item1 := NewPriorityQueueItem("zero1", 0) + time.Sleep(time.Millisecond) + item2 := NewPriorityQueueItem("zero2", 0) + + heap.Push(pq, item2) + heap.Push(pq, item1) + + // Should come out in timestamp order + result1 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "zero1", result1.message) + + result2 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "zero2", result2.message) + }) + + t.Run("very high priority values", func(t *testing.T) { + pq := &PriorityQueue[string]{} + heap.Init(pq) + + item1 := NewPriorityQueueItem("normal", 1000) + item2 := NewPriorityQueueItem("very high", math.MaxUint64) + + heap.Push(pq, item1) + heap.Push(pq, item2) + + result := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "very high", result.message) + assert.Equal(t, uint64(math.MaxUint64), result.priority) + }) +} diff --git a/module/state_synchronization/indexer/collection_executed_metric.go b/module/state_synchronization/indexer/collection_executed_metric.go index bc1ee3fd341..844c72692c6 100644 --- a/module/state_synchronization/indexer/collection_executed_metric.go +++ b/module/state_synchronization/indexer/collection_executed_metric.go @@ -29,6 +29,8 @@ type CollectionExecutedMetricImpl struct { blockTransactions *stdmap.IdentifierMap // Map to track transactions for each block for sealed metrics } +var _ module.CollectionExecutedMetric = (*CollectionExecutedMetricImpl)(nil) + func NewCollectionExecutedMetricImpl( log zerolog.Logger, accessMetrics module.AccessMetrics, diff --git a/module/state_synchronization/indexer/in_memory_indexer.go b/module/state_synchronization/indexer/in_memory_indexer.go new file mode 100644 index 00000000000..7af0586cfdc --- /dev/null +++ b/module/state_synchronization/indexer/in_memory_indexer.go @@ -0,0 +1,190 @@ +package indexer + +import ( + "fmt" + "time" + + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store/inmemory/unsynchronized" + "github.com/onflow/flow-go/utils/logging" +) + +// InMemoryIndexer handles indexing of block execution data in memory. +// It stores data in unsynchronized in-memory caches that are designed +// to be populated once before being read. +type InMemoryIndexer struct { + log zerolog.Logger + registers *unsynchronized.Registers + events *unsynchronized.Events + collections *unsynchronized.Collections + results *unsynchronized.LightTransactionResults + txResultErrMsgs *unsynchronized.TransactionResultErrorMessages + executionResult *flow.ExecutionResult + header *flow.Header + lockManager storage.LockManager +} + +// NewInMemoryIndexer creates a new indexer that uses in-memory storage implementations. +// This is designed for processing unsealed blocks in the optimistic syncing pipeline. +// The caches are created externally and passed to the indexer, as they will also be used +// by the persister to save data permanently when a block is sealed. +func NewInMemoryIndexer( + log zerolog.Logger, + registers *unsynchronized.Registers, + events *unsynchronized.Events, + collections *unsynchronized.Collections, + results *unsynchronized.LightTransactionResults, + txResultErrMsgs *unsynchronized.TransactionResultErrorMessages, + executionResult *flow.ExecutionResult, + header *flow.Header, + lockManager storage.LockManager, +) *InMemoryIndexer { + indexer := &InMemoryIndexer{ + log: log.With().Str("component", "in_memory_indexer").Logger(), + registers: registers, + events: events, + collections: collections, + results: results, + txResultErrMsgs: txResultErrMsgs, + executionResult: executionResult, + header: header, + lockManager: lockManager, + } + + indexer.log.Info(). + Uint64("latest_height", header.Height). + Msg("indexer initialized") + + return indexer +} + +// IndexTxResultErrorMessagesData index transaction result error messages +// No errors are expected during normal operation. +func (i *InMemoryIndexer) IndexTxResultErrorMessagesData(txResultErrMsgs []flow.TransactionResultErrorMessage) error { + if err := i.txResultErrMsgs.Store(i.executionResult.BlockID, txResultErrMsgs); err != nil { + return fmt.Errorf("could not index transaction result error messages: %w", err) + } + return nil +} + +// IndexBlockData indexes all execution block data. +// No errors are expected during normal operation. +func (i *InMemoryIndexer) IndexBlockData(data *execution_data.BlockExecutionDataEntity) error { + log := i.log.With().Hex("block_id", logging.ID(data.BlockID)).Logger() + log.Debug().Msg("indexing block data") + + if i.executionResult.BlockID != data.BlockID { + return fmt.Errorf("invalid block execution data. expected block_id=%s, actual block_id=%s", i.executionResult.BlockID, data.BlockID) + } + + start := time.Now() + + events := make([]flow.Event, 0) + results := make([]flow.LightTransactionResult, 0) + registers := make(map[ledger.Path]*ledger.Payload) + indexedCollections := 0 + + lctx := i.lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertCollection) + if err != nil { + return fmt.Errorf("could not acquire lock for collection insert: %w", err) + } + defer lctx.Release() + + // Process all chunk data in a single pass + for idx, chunk := range data.ChunkExecutionDatas { + // Collect events + events = append(events, chunk.Events...) + + // Collect transaction results + results = append(results, chunk.TransactionResults...) + + // Process collections (except system chunk) + if idx < len(data.ChunkExecutionDatas)-1 { + if err := i.indexCollection(lctx, chunk.Collection); err != nil { + return fmt.Errorf("could not handle collection: %w", err) + } + indexedCollections++ + } + + // Process register updates + if chunk.TrieUpdate != nil { + // Verify trie update integrity + if len(chunk.TrieUpdate.Paths) != len(chunk.TrieUpdate.Payloads) { + return fmt.Errorf("update paths length is %d and registers length is %d and they don't match", + len(chunk.TrieUpdate.Paths), len(chunk.TrieUpdate.Payloads)) + } + + // Collect registers (last one for a path wins) + for i, path := range chunk.TrieUpdate.Paths { + registers[path] = chunk.TrieUpdate.Payloads[i] + } + } + } + + if err := i.events.Store(data.BlockID, []flow.EventsList{events}); err != nil { + return fmt.Errorf("could not index events: %w", err) + } + + if err := i.results.Store(data.BlockID, results); err != nil { + return fmt.Errorf("could not index transaction results: %w", err) + } + + if err := i.indexRegisters(registers, i.header.Height); err != nil { + return fmt.Errorf("could not index registers: %w", err) + } + + log.Debug(). + Dur("duration_ms", time.Since(start)). + Int("event_count", len(events)). + Int("register_count", len(registers)). + Int("result_count", len(results)). + Int("collection_count", indexedCollections). + Msg("indexed block data") + + return nil +} + +// indexRegisters processes register payloads and stores them in the register database. +// No errors are expected during normal operation. +func (i *InMemoryIndexer) indexRegisters(registers map[ledger.Path]*ledger.Payload, height uint64) error { + regEntries := make(flow.RegisterEntries, 0, len(registers)) + + for _, register := range registers { + k, err := register.Key() + if err != nil { + return fmt.Errorf("failed to get ledger key: %w", err) + } + + id, err := convert.LedgerKeyToRegisterID(k) + if err != nil { + return fmt.Errorf("failed to convert ledger key to register id: %w", err) + } + + regEntries = append(regEntries, flow.RegisterEntry{ + Key: id, + Value: register.Value(), + }) + } + + return i.registers.Store(regEntries, height) +} + +// indexCollection processes a collection and its associated transactions. +// No errors are expected during normal operation. +func (i *InMemoryIndexer) indexCollection(lctx lockctx.Proof, collection *flow.Collection) error { + // Store the light collection and index by transaction + _, err := i.collections.StoreAndIndexByTransaction(lctx, collection) + if err != nil { + return err + } + + return nil +} diff --git a/module/state_synchronization/indexer/in_memory_indexer_test.go b/module/state_synchronization/indexer/in_memory_indexer_test.go new file mode 100644 index 00000000000..b5b12c586bd --- /dev/null +++ b/module/state_synchronization/indexer/in_memory_indexer_test.go @@ -0,0 +1,361 @@ +package indexer + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store/inmemory/unsynchronized" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestNewInMemoryIndexer(t *testing.T) { + lockManager := storage.NewTestingLockManager() + header := unittest.BlockHeaderFixture() + block := unittest.BlockWithParentFixture(header) + exeResult := unittest.ExecutionResultFixture(unittest.WithBlock(block)) + indexer, _ := createInMemoryIndexer(lockManager, exeResult, header) + + assert.NotNil(t, indexer) + assert.Equal(t, header.Height, indexer.registers.LatestHeight()) +} + +func TestInMemoryIndexer_IndexBlockData(t *testing.T) { + t.Run("Index Single Chunk and Single Register", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() + header := unittest.BlockHeaderFixture() + block := unittest.BlockWithParentFixture(header) + blockID := block.ID() + exeResult := unittest.ExecutionResultFixture(unittest.WithBlock(block)) + indexer, _ := createInMemoryIndexer(lockManager, exeResult, header) + + trie := TrieUpdateRandomLedgerPayloadsFixture(t) + require.NotEmpty(t, trie.Payloads) + collection := unittest.CollectionFixture(0) + + ed := &execution_data.BlockExecutionData{ + BlockID: blockID, + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ + { + Collection: &collection, + TrieUpdate: trie, + }, + }, + } + + execData := execution_data.NewBlockExecutionDataEntity(blockID, ed) + + err := indexer.IndexBlockData(execData) + assert.NoError(t, err) + + // Verify registers were indexed + for _, payload := range trie.Payloads { + k, err := payload.Key() + require.NoError(t, err) + + id, err := convert.LedgerKeyToRegisterID(k) + require.NoError(t, err) + + value, err := indexer.registers.Get(id, header.Height) + require.NoError(t, err) + + // Compare byte slices directly instead of comparing types + assert.ElementsMatch(t, []byte(payload.Value()), value, "Register values should match") + } + }) + + t.Run("Index Multiple Chunks and Merge Same Register Updates", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() + header := unittest.BlockHeaderFixture() + block := unittest.BlockWithParentFixture(header) + blockID := block.ID() + exeResult := unittest.ExecutionResultFixture(unittest.WithBlock(block)) + indexer, _ := createInMemoryIndexer(lockManager, exeResult, header) + + tries := []*ledger.TrieUpdate{TrieUpdateRandomLedgerPayloadsFixture(t), TrieUpdateRandomLedgerPayloadsFixture(t)} + // Make sure we have two register updates that are updating the same value + tries[1].Paths[0] = tries[0].Paths[0] + testValue := tries[1].Payloads[0] + key, err := testValue.Key() + require.NoError(t, err) + testRegisterID, err := convert.LedgerKeyToRegisterID(key) + require.NoError(t, err) + + collection := unittest.CollectionFixture(0) + + ed := &execution_data.BlockExecutionData{ + BlockID: blockID, + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ + { + Collection: &collection, + TrieUpdate: tries[0], + }, + { + Collection: &collection, + TrieUpdate: tries[1], + }, + }, + } + + execData := execution_data.NewBlockExecutionDataEntity(blockID, ed) + + err = indexer.IndexBlockData(execData) + assert.NoError(t, err) + + // Verify register was indexed + value, err := indexer.registers.Get(testRegisterID, header.Height) + require.NoError(t, err) + // Compare byte slices directly + assert.ElementsMatch(t, []byte(testValue.Value()), value, "Register values should match") + }) + + t.Run("Index Events", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() + header := unittest.BlockHeaderFixture() + block := unittest.BlockWithParentFixture(header) + blockID := block.ID() + exeResult := unittest.ExecutionResultFixture(unittest.WithBlock(block)) + indexer, _ := createInMemoryIndexer(lockManager, exeResult, header) + + expectedEvents := unittest.EventsFixture(20) + collection := unittest.CollectionFixture(0) + + ed := &execution_data.BlockExecutionData{ + BlockID: blockID, + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ + { + Collection: &collection, + Events: expectedEvents[:10], + }, + { + Collection: &collection, + Events: expectedEvents[10:], + }, + }, + } + + execData := execution_data.NewBlockExecutionDataEntity(blockID, ed) + + err := indexer.IndexBlockData(execData) + assert.NoError(t, err) + + // Verify events were indexed correctly + events, err := indexer.events.ByBlockID(blockID) + require.NoError(t, err) + assert.ElementsMatch(t, expectedEvents, events) + }) + + t.Run("Index Tx Results", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() + header := unittest.BlockHeaderFixture() + block := unittest.BlockWithParentFixture(header) + blockID := block.ID() + exeResult := unittest.ExecutionResultFixture(unittest.WithBlock(block)) + indexer, _ := createInMemoryIndexer(lockManager, exeResult, header) + + expectedResults := unittest.LightTransactionResultsFixture(20) + collection := unittest.CollectionFixture(0) + + ed := &execution_data.BlockExecutionData{ + BlockID: blockID, + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ + // Split results into 2 chunks + { + Collection: &collection, + TransactionResults: expectedResults[:10], + }, + { + Collection: &collection, + TransactionResults: expectedResults[10:], + }, + }, + } + + execData := execution_data.NewBlockExecutionDataEntity(blockID, ed) + + err := indexer.IndexBlockData(execData) + assert.NoError(t, err) + + // Verify results were indexed correctly + results, err := indexer.results.ByBlockID(blockID) + require.NoError(t, err) + assert.ElementsMatch(t, expectedResults, results) + }) + + t.Run("Index Collections", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() + header := unittest.BlockHeaderFixture() + block := unittest.BlockWithParentFixture(header) + blockID := block.ID() + exeResult := unittest.ExecutionResultFixture(unittest.WithBlock(block)) + indexer, transactions := createInMemoryIndexer(lockManager, exeResult, header) + + // Create collections and store them directly first + expectedCollections := unittest.CollectionListFixture(2) + systemChunkCollection := unittest.CollectionFixture(1) + + ed := &execution_data.BlockExecutionData{ + BlockID: blockID, + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ + {Collection: expectedCollections[0]}, + {Collection: expectedCollections[1]}, + {Collection: &systemChunkCollection}, + }, + } + + execData := execution_data.NewBlockExecutionDataEntity(blockID, ed) + + err := indexer.IndexBlockData(execData) + assert.NoError(t, err) + + // Verify collections can be retrieved + for _, expectedCollection := range expectedCollections { + coll, err := indexer.collections.ByID(expectedCollection.ID()) + require.NoError(t, err) + assert.Equal(t, expectedCollection.Transactions, coll.Transactions) + + lightColl, err := indexer.collections.LightByID(expectedCollection.ID()) + require.NoError(t, err) + assert.Equal(t, expectedCollection.Light().Transactions, lightColl.Transactions) + + // Verify transactions were indexed + for _, tx := range expectedCollection.Transactions { + storedTx, err := transactions.ByID(tx.ID()) + require.NoError(t, err) + assert.Equal(t, tx.ID(), storedTx.ID()) + + storedLightTx, err := indexer.collections.LightByTransactionID(tx.ID()) + require.NoError(t, err) + assert.Equal(t, expectedCollection.Light().ID(), storedLightTx.ID()) + } + } + }) + + t.Run("Index AllTheThings", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() + header := unittest.BlockHeaderFixture() + block := unittest.BlockWithParentFixture(header) + blockID := block.ID() + exeResult := unittest.ExecutionResultFixture(unittest.WithBlock(block)) + indexer, _ := createInMemoryIndexer(lockManager, exeResult, header) + + expectedEvents := unittest.EventsFixture(20) + expectedResults := unittest.LightTransactionResultsFixture(20) + expectedCollections := unittest.CollectionListFixture(2) + systemChunkCollection := unittest.CollectionFixture(1) + expectedTries := []*ledger.TrieUpdate{TrieUpdateRandomLedgerPayloadsFixture(t), TrieUpdateRandomLedgerPayloadsFixture(t)} + + ed := &execution_data.BlockExecutionData{ + BlockID: blockID, + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ + { + Collection: expectedCollections[0], + Events: expectedEvents[:10], + TransactionResults: expectedResults[:10], + TrieUpdate: expectedTries[0], + }, + { + Collection: expectedCollections[1], + TransactionResults: expectedResults[10:], + Events: expectedEvents[10:], + TrieUpdate: expectedTries[1], + }, + { + Collection: &systemChunkCollection, + }, + }, + } + + execData := execution_data.NewBlockExecutionDataEntity(blockID, ed) + + err := indexer.IndexBlockData(execData) + assert.NoError(t, err) + + // Verify all events were indexed + events, err := indexer.events.ByBlockID(blockID) + require.NoError(t, err) + assert.Len(t, events, len(expectedEvents)) + + // Verify all results were indexed + results, err := indexer.results.ByBlockID(blockID) + require.NoError(t, err) + assert.Len(t, results, len(expectedResults)) + + // Verify collections were indexed + for _, expectedCollection := range expectedCollections { + lightColl, err := indexer.collections.LightByID(expectedCollection.ID()) + require.NoError(t, err) + assert.Equal(t, expectedCollection.Light().Transactions, lightColl.Transactions) + } + + // Verify registers were indexed + // Collect all payloads across all tries + payloads := make(map[flow.RegisterID][]byte) + for _, trie := range expectedTries { + for _, payload := range trie.Payloads { + k, err := payload.Key() + require.NoError(t, err) + id, err := convert.LedgerKeyToRegisterID(k) + require.NoError(t, err) + payloads[id] = []byte(payload.Value()) + } + } + + // Check each register has the correct value + for id, expectedValue := range payloads { + value, err := indexer.registers.Get(id, header.Height) + require.NoError(t, err) + assert.ElementsMatch(t, expectedValue, value, "Register values should match") + } + }) + + t.Run("Index Transaction Error Messages", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() + header := unittest.BlockHeaderFixture() + block := unittest.BlockWithParentFixture(header) + exeResult := unittest.ExecutionResultFixture(unittest.WithBlock(block)) + indexer, _ := createInMemoryIndexer(lockManager, exeResult, header) + + txResultErrMsgsData := make([]flow.TransactionResultErrorMessage, 2) + for i := 0; i < 2; i++ { + txResultErrMsgsData[i] = flow.TransactionResultErrorMessage{ + TransactionID: unittest.IdentifierFixture(), + ErrorMessage: "expected test error", + Index: uint32(i), + ExecutorID: unittest.IdentifierFixture(), + } + } + + err := indexer.IndexTxResultErrorMessagesData(txResultErrMsgsData) + require.NoError(t, err) + + results, err := indexer.txResultErrMsgs.ByBlockID(block.ID()) + require.NoError(t, err) + assert.ElementsMatch(t, txResultErrMsgsData, results) + }) +} + +// Helper functions + +func createInMemoryIndexer(lockManager lockctx.Manager, executionResult *flow.ExecutionResult, header *flow.Header) (*InMemoryIndexer, *unsynchronized.Transactions) { + transactions := unsynchronized.NewTransactions() + return NewInMemoryIndexer(zerolog.Nop(), + unsynchronized.NewRegisters(header.Height), + unsynchronized.NewEvents(), + unsynchronized.NewCollections(transactions), + unsynchronized.NewLightTransactionResults(), + unsynchronized.NewTransactionResultErrorMessages(), + executionResult, + header, + lockManager, + ), transactions + +} diff --git a/module/state_synchronization/indexer/indexer_core.go b/module/state_synchronization/indexer/indexer_core.go index fc2f173ed60..ef768840a7a 100644 --- a/module/state_synchronization/indexer/indexer_core.go +++ b/module/state_synchronization/indexer/indexer_core.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "golang.org/x/sync/errgroup" @@ -35,6 +36,7 @@ type IndexerCore struct { derivedChainData *derived.DerivedChainData serviceAddress flow.Address + lockManager lockctx.Manager } // New execution state indexer used to ingest block execution data and index it by height. @@ -53,6 +55,7 @@ func New( chain flow.Chain, derivedChainData *derived.DerivedChainData, collectionExecutedMetric module.CollectionExecutedMetric, + lockManager lockctx.Manager, ) (*IndexerCore, error) { log = log.With().Str("component", "execution_indexer").Logger() metrics.InitializeLatestHeight(registers.LatestHeight()) @@ -76,6 +79,7 @@ func New( derivedChainData: derivedChainData, collectionExecutedMetric: collectionExecutedMetric, + lockManager: lockManager, }, nil } @@ -144,22 +148,21 @@ func (c *IndexerCore) IndexBlockData(data *execution_data.BlockExecutionDataEnti results = append(results, chunk.TransactionResults...) } - batch := c.protocolDB.NewBatch() - defer batch.Close() - - err := c.events.BatchStore(data.BlockID, []flow.EventsList{events}, batch) - if err != nil { - return fmt.Errorf("could not index events at height %d: %w", header.Height, err) - } + err := c.protocolDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := c.events.BatchStore(data.BlockID, []flow.EventsList{events}, rw) + if err != nil { + return fmt.Errorf("could not index events at height %d: %w", header.Height, err) + } - err = c.results.BatchStore(data.BlockID, results, batch) - if err != nil { - return fmt.Errorf("could not index transaction results at height %d: %w", header.Height, err) - } + err = c.results.BatchStore(data.BlockID, results, rw) + if err != nil { + return fmt.Errorf("could not index transaction results at height %d: %w", header.Height, err) + } + return nil + }) - err = batch.Commit() if err != nil { - return fmt.Errorf("batch flush error: %w", err) + return fmt.Errorf("could not commit block data: %w", err) } eventCount = len(events) @@ -186,9 +189,9 @@ func (c *IndexerCore) IndexBlockData(data *execution_data.BlockExecutionDataEnti indexedCount := 0 if len(data.ChunkExecutionDatas) > 0 { for _, chunk := range data.ChunkExecutionDatas[0 : len(data.ChunkExecutionDatas)-1] { - err := HandleCollection(chunk.Collection, c.collections, c.transactions, c.log, c.collectionExecutedMetric) + err := c.indexCollection(chunk.Collection) if err != nil { - return fmt.Errorf("could not handle collection") + return err } indexedCount++ } @@ -326,45 +329,41 @@ func (c *IndexerCore) indexRegisters(registers map[ledger.Path]*ledger.Payload, return c.registers.Store(regEntries, height) } -// HandleCollection handles the response of the collection request made earlier when a block was received. +func (c *IndexerCore) indexCollection(collection *flow.Collection) error { + lctx := c.lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertCollection) + if err != nil { + return fmt.Errorf("could not acquire lock for indexing collections: %w", err) + } + + err = IndexCollection(lctx, collection, c.collections, c.log, c.collectionExecutedMetric) + if err != nil { + return fmt.Errorf("could not handle collection") + } + return nil +} + +// IndexCollection handles the response of the collection request made earlier when a block was received. // No errors expected during normal operations. -func HandleCollection( +func IndexCollection( + lctx lockctx.Proof, collection *flow.Collection, collections storage.Collections, - transactions storage.Transactions, logger zerolog.Logger, collectionExecutedMetric module.CollectionExecutedMetric, ) error { - light := collection.Light() - - collectionExecutedMetric.CollectionFinalized(light) - collectionExecutedMetric.CollectionExecuted(light) - // FIX: we can't index guarantees here, as we might have more than one block // with the same collection as long as it is not finalized - // store the light collection (collection minus the transaction body - those are stored separately) - // and add transaction ids as index - err := collections.StoreLightAndIndexByTransaction(&light) + // store the collection, including constituent transactions, and index transactionID -> collectionID + light, err := collections.StoreAndIndexByTransaction(lctx, collection) if err != nil { - // ignore collection if already seen - if errors.Is(err, storage.ErrAlreadyExists) { - logger.Debug(). - Hex("collection_id", logging.Entity(light)). - Msg("collection is already seen") - return nil - } return err } - // now store each of the transaction body - for _, tx := range collection.Transactions { - err := transactions.Store(tx) - if err != nil { - return fmt.Errorf("could not store transaction (%x): %w", tx.ID(), err) - } - } - + collectionExecutedMetric.CollectionFinalized(light) + collectionExecutedMetric.CollectionExecuted(light) return nil } diff --git a/module/state_synchronization/indexer/indexer_core_test.go b/module/state_synchronization/indexer/indexer_core_test.go index aa80704151b..e6cd4d2783c 100644 --- a/module/state_synchronization/indexer/indexer_core_test.go +++ b/module/state_synchronization/indexer/indexer_core_test.go @@ -2,7 +2,6 @@ package indexer import ( "context" - "crypto/rand" "fmt" "os" "testing" @@ -16,9 +15,8 @@ import ( "github.com/onflow/flow-go/fvm/storage/derived" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/convert" - "github.com/onflow/flow-go/ledger/common/pathfinder" - "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/metrics" @@ -162,7 +160,7 @@ func (i *indexCoreTest) setGetRegisters(f func(t *testing.T, ID flow.RegisterID, func (i *indexCoreTest) useDefaultStorageMocks() *indexCoreTest { - i.collections.On("StoreLightAndIndexByTransaction", mock.AnythingOfType("*flow.LightCollection")).Return(nil).Maybe() + i.collections.On("StoreAndIndexByTransaction", mock.Anything, mock.AnythingOfType("*flow.Collection")).Return(flow.LightCollection{}, nil).Maybe() i.transactions.On("Store", mock.AnythingOfType("*flow.TransactionBody")).Return(nil).Maybe() return i @@ -183,6 +181,7 @@ func (i *indexCoreTest) useDefaultTransactionResults() *indexCoreTest { } func (i *indexCoreTest) initIndexer() *indexCoreTest { + lockManager := storage.NewTestingLockManager() db, dbDir := unittest.TempBadgerDB(i.t) i.t.Cleanup(func() { require.NoError(i.t, db.Close()) @@ -231,6 +230,7 @@ func (i *indexCoreTest) initIndexer() *indexCoreTest { flow.Testnet.Chain(), derivedChainData, collectionExecutedMetric, + lockManager, ) require.NoError(i.t, err) i.indexer = indexer @@ -255,7 +255,7 @@ func TestExecutionState_IndexBlockData(t *testing.T) { // this test makes sure the index block data is correctly calling store register with the // same entries we create as a block execution data test, and correctly converts the registers t.Run("Index Single Chunk and Single Register", func(t *testing.T) { - trie := trieUpdateFixture(t) + trie := TrieUpdateRandomLedgerPayloadsFixture(t) ed := &execution_data.BlockExecutionData{ BlockID: block.ID(), ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ @@ -290,7 +290,7 @@ func TestExecutionState_IndexBlockData(t *testing.T) { // we only update that register once with the latest value, so this makes sure merging of // registers is done correctly. t.Run("Index Multiple Chunks and Merge Same Register Updates", func(t *testing.T) { - tries := []*ledger.TrieUpdate{trieUpdateFixture(t), trieUpdateFixture(t)} + tries := []*ledger.TrieUpdate{TrieUpdateRandomLedgerPayloadsFixture(t), TrieUpdateRandomLedgerPayloadsFixture(t)} // make sure we have two register updates that are updating the same value, so we can check // if the value from the second update is being persisted instead of first tries[1].Paths[0] = tries[0].Paths[0] @@ -477,7 +477,7 @@ func TestExecutionState_IndexBlockData(t *testing.T) { expectedEvents := unittest.EventsFixture(20) expectedResults := unittest.LightTransactionResultsFixture(20) expectedCollections := unittest.CollectionListFixture(2) - expectedTries := []*ledger.TrieUpdate{trieUpdateFixture(t), trieUpdateFixture(t)} + expectedTries := []*ledger.TrieUpdate{TrieUpdateRandomLedgerPayloadsFixture(t), TrieUpdateRandomLedgerPayloadsFixture(t)} expectedPayloads := make([]*ledger.Payload, 0) for _, trie := range expectedTries { expectedPayloads = append(expectedPayloads, trie.Payloads...) @@ -604,58 +604,6 @@ func newBlockHeadersStorage(blocks []*flow.Block) storage.Headers { return synctest.MockBlockHeaderStorage(synctest.WithByID(blocksByID)) } -func trieUpdateWithPayloadsFixture(payloads []*ledger.Payload) *ledger.TrieUpdate { - keys := make([]ledger.Key, 0) - values := make([]ledger.Value, 0) - for _, payload := range payloads { - key, _ := payload.Key() - keys = append(keys, key) - values = append(values, payload.Value()) - } - - update, _ := ledger.NewUpdate(ledger.DummyState, keys, values) - trie, _ := pathfinder.UpdateToTrieUpdate(update, complete.DefaultPathFinderVersion) - return trie -} - -func trieUpdateFixture(t *testing.T) *ledger.TrieUpdate { - return trieUpdateWithPayloadsFixture( - []*ledger.Payload{ - ledgerPayloadFixture(t), - ledgerPayloadFixture(t), - ledgerPayloadFixture(t), - ledgerPayloadFixture(t), - }) -} - -func ledgerPayloadFixture(t *testing.T) *ledger.Payload { - owner := unittest.RandomAddressFixture() - key := make([]byte, 8) - _, err := rand.Read(key) - require.NoError(t, err) - val := make([]byte, 8) - _, err = rand.Read(key) - require.NoError(t, err) - return ledgerPayloadWithValuesFixture(owner.String(), fmt.Sprintf("%x", key), val) -} - -func ledgerPayloadWithValuesFixture(owner string, key string, value []byte) *ledger.Payload { - k := ledger.Key{ - KeyParts: []ledger.KeyPart{ - { - Type: ledger.KeyPartOwner, - Value: []byte(owner), - }, - { - Type: ledger.KeyPartKey, - Value: []byte(key), - }, - }, - } - - return ledger.NewPayload(k, value) -} - // trieRegistersPayloadComparer checks that trie payloads and register payloads are same, used for testing. func trieRegistersPayloadComparer(t *testing.T, triePayloads []*ledger.Payload, registerPayloads flow.RegisterEntries) { assert.Equal(t, len(triePayloads), len(registerPayloads.Values()), "registers length should equal") @@ -677,6 +625,7 @@ func trieRegistersPayloadComparer(t *testing.T, triePayloads []*ledger.Payload, } func TestIndexerIntegration_StoreAndGet(t *testing.T) { + lockManager := storage.NewTestingLockManager() regOwnerAddress := unittest.RandomAddressFixture() regOwner := string(regOwnerAddress.Bytes()) regKey := "code" @@ -698,7 +647,7 @@ func TestIndexerIntegration_StoreAndGet(t *testing.T) { pebbleStorage.RunWithRegistersStorageAtInitialHeights(t, 0, 0, func(registers *pebbleStorage.Registers) { index, err := New( logger, - metrics, + module.ExecutionStateIndexerMetrics(metrics), badgerimpl.ToDB(db), registers, nil, @@ -709,6 +658,7 @@ func TestIndexerIntegration_StoreAndGet(t *testing.T) { flow.Testnet.Chain(), derivedChainData, nil, + lockManager, ) require.NoError(t, err) @@ -732,7 +682,7 @@ func TestIndexerIntegration_StoreAndGet(t *testing.T) { pebbleStorage.RunWithRegistersStorageAtInitialHeights(t, 0, 0, func(registers *pebbleStorage.Registers) { index, err := New( logger, - metrics, + module.ExecutionStateIndexerMetrics(metrics), badgerimpl.ToDB(db), registers, nil, @@ -743,6 +693,7 @@ func TestIndexerIntegration_StoreAndGet(t *testing.T) { flow.Testnet.Chain(), derivedChainData, nil, + lockManager, ) require.NoError(t, err) @@ -759,7 +710,7 @@ func TestIndexerIntegration_StoreAndGet(t *testing.T) { pebbleStorage.RunWithRegistersStorageAtInitialHeights(t, 0, 0, func(registers *pebbleStorage.Registers) { index, err := New( logger, - metrics, + module.ExecutionStateIndexerMetrics(metrics), badgerimpl.ToDB(db), registers, nil, @@ -770,6 +721,7 @@ func TestIndexerIntegration_StoreAndGet(t *testing.T) { flow.Testnet.Chain(), derivedChainData, nil, + lockManager, ) require.NoError(t, err) @@ -803,7 +755,7 @@ func TestIndexerIntegration_StoreAndGet(t *testing.T) { pebbleStorage.RunWithRegistersStorageAtInitialHeights(t, 0, 0, func(registers *pebbleStorage.Registers) { index, err := New( logger, - metrics, + module.ExecutionStateIndexerMetrics(metrics), badgerimpl.ToDB(db), registers, nil, @@ -814,6 +766,7 @@ func TestIndexerIntegration_StoreAndGet(t *testing.T) { flow.Testnet.Chain(), derivedChainData, nil, + lockManager, ) require.NoError(t, err) @@ -826,6 +779,6 @@ func TestIndexerIntegration_StoreAndGet(t *testing.T) { // helper to store register at height and increment index range func storeRegisterWithValue(indexer *IndexerCore, height uint64, owner string, key string, value []byte) error { - payload := ledgerPayloadWithValuesFixture(owner, key, value) + payload := LedgerPayloadFixture(owner, key, value) return indexer.indexRegisters(map[ledger.Path]*ledger.Payload{ledger.DummyPath: payload}, height) } diff --git a/module/state_synchronization/indexer/indexer_test.go b/module/state_synchronization/indexer/indexer_test.go index bcf7e42feb6..f720ca80c8c 100644 --- a/module/state_synchronization/indexer/indexer_test.go +++ b/module/state_synchronization/indexer/indexer_test.go @@ -130,6 +130,8 @@ func (m *mockProgressInitializer) Initialize(defaultIndex uint64) (storage.Consu return m.progress, nil } +var _ storage.ConsumerProgress = (*mockProgress)(nil) + type mockProgress struct { index *atomic.Uint64 doneIndex *atomic.Uint64 @@ -159,6 +161,10 @@ func (w *mockProgress) SetProcessedIndex(index uint64) error { return nil } +func (w *mockProgress) BatchSetProcessedIndex(_ uint64, _ storage.ReaderBatchWriter) error { + return fmt.Errorf("batch not supported") +} + func (w *mockProgress) InitProcessedIndex(index uint64) error { w.index.Store(index) return nil @@ -177,7 +183,7 @@ func TestIndexer_Success(t *testing.T) { test := newIndexerTest(t, blocks, lastIndexedIndex) test.setBlockDataByID(func(ID flow.Identifier) (*execution_data.BlockExecutionDataEntity, bool) { - trie := trieUpdateFixture(t) + trie := TrieUpdateRandomLedgerPayloadsFixture(t) collection := unittest.CollectionFixture(0) ed := &execution_data.BlockExecutionData{ BlockID: ID, @@ -221,7 +227,7 @@ func TestIndexer_Failure(t *testing.T) { test := newIndexerTest(t, blocks, lastIndexedIndex) test.setBlockDataByID(func(ID flow.Identifier) (*execution_data.BlockExecutionDataEntity, bool) { - trie := trieUpdateFixture(t) + trie := TrieUpdateRandomLedgerPayloadsFixture(t) collection := unittest.CollectionFixture(0) ed := &execution_data.BlockExecutionData{ BlockID: ID, diff --git a/module/state_synchronization/indexer/ledger_trie_updates_test_utils.go b/module/state_synchronization/indexer/ledger_trie_updates_test_utils.go new file mode 100644 index 00000000000..8547c43645b --- /dev/null +++ b/module/state_synchronization/indexer/ledger_trie_updates_test_utils.go @@ -0,0 +1,80 @@ +package indexer + +import ( + "crypto/rand" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/pathfinder" + "github.com/onflow/flow-go/ledger/complete" + "github.com/onflow/flow-go/utils/unittest" +) + +// TrieUpdateRandomLedgerPayloadsFixture creates a test trie update with multiple test payloads +// for use in testing register persistence functionality. +func TrieUpdateRandomLedgerPayloadsFixture(t *testing.T) *ledger.TrieUpdate { + return TrieUpdateWithPayloadsFixture( + t, + []*ledger.Payload{ + LedgerRandomPayloadFixture(t), + LedgerRandomPayloadFixture(t), + LedgerRandomPayloadFixture(t), + LedgerRandomPayloadFixture(t), + }) +} + +// TrieUpdateWithPayloadsFixture creates a trie update from the provided payloads. +// It extracts keys and values from payloads and constructs a proper ledger update +// and trie update structure for testing purposes. +func TrieUpdateWithPayloadsFixture(t *testing.T, payloads []*ledger.Payload) *ledger.TrieUpdate { + keys := make([]ledger.Key, 0) + values := make([]ledger.Value, 0) + for _, payload := range payloads { + key, err := payload.Key() + require.NoError(t, err) + keys = append(keys, key) + values = append(values, payload.Value()) + } + + update, err := ledger.NewUpdate(ledger.DummyState, keys, values) + require.NoError(t, err) + trie, err := pathfinder.UpdateToTrieUpdate(update, complete.DefaultPathFinderVersion) + require.NoError(t, err) + return trie +} + +// LedgerRandomPayloadFixture creates a single test payload with a random owner, key, and value +// for use in ledger and register testing scenarios. +func LedgerRandomPayloadFixture(t *testing.T) *ledger.Payload { + owner := unittest.RandomAddressFixture() + key := make([]byte, 8) + _, err := rand.Read(key) + require.NoError(t, err) + val := make([]byte, 8) + _, err = rand.Read(val) + require.NoError(t, err) + return LedgerPayloadFixture(owner.String(), fmt.Sprintf("%x", key), val) +} + +// LedgerPayloadFixture creates a ledger payload with the specified owner, key, and value. +// It constructs a proper ledger key with owner and key parts and returns a payload +// suitable for testing ledger operations. +func LedgerPayloadFixture(owner string, key string, value []byte) *ledger.Payload { + k := ledger.Key{ + KeyParts: []ledger.KeyPart{ + { + Type: ledger.KeyPartOwner, + Value: []byte(owner), + }, + { + Type: ledger.KeyPartKey, + Value: []byte(key), + }, + }, + } + + return ledger.NewPayload(k, value) +} diff --git a/module/state_synchronization/requester/mock/execution_data_requester.go b/module/state_synchronization/requester/mock/execution_data_requester.go new file mode 100644 index 00000000000..657afe6b8b0 --- /dev/null +++ b/module/state_synchronization/requester/mock/execution_data_requester.go @@ -0,0 +1,59 @@ +// Code generated by mockery v2.53.3. DO NOT EDIT. + +package mock + +import ( + context "context" + + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" + mock "github.com/stretchr/testify/mock" +) + +// ExecutionDataRequester is an autogenerated mock type for the ExecutionDataRequester type +type ExecutionDataRequester struct { + mock.Mock +} + +// RequestExecutionData provides a mock function with given fields: ctx +func (_m *ExecutionDataRequester) RequestExecutionData(ctx context.Context) (*execution_data.BlockExecutionData, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for RequestExecutionData") + } + + var r0 *execution_data.BlockExecutionData + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*execution_data.BlockExecutionData, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *execution_data.BlockExecutionData); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution_data.BlockExecutionData) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewExecutionDataRequester creates a new instance of ExecutionDataRequester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionDataRequester(t interface { + mock.TestingT + Cleanup(func()) +}) *ExecutionDataRequester { + mock := &ExecutionDataRequester{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/state_synchronization/requester/oneshot_execution_data_requester.go b/module/state_synchronization/requester/oneshot_execution_data_requester.go index 6d6979cb229..b7059860099 100644 --- a/module/state_synchronization/requester/oneshot_execution_data_requester.go +++ b/module/state_synchronization/requester/oneshot_execution_data_requester.go @@ -14,6 +14,16 @@ import ( "github.com/onflow/flow-go/module/executiondatasync/execution_data" ) +// ExecutionDataRequester defines the interface for requesting execution data for a block. +type ExecutionDataRequester interface { + // RequestExecutionData requests execution data for a given block. + // + // Expected errors: + // - context.Canceled: if the provided context was canceled before completion + // All other errors are unexpected exceptions and may indicate invalid execution data was received. + RequestExecutionData(ctx context.Context) (*execution_data.BlockExecutionData, error) +} + // OneshotExecutionDataConfig is a config for the oneshot execution data requester. // It contains the retry settings for the execution data fetch. type OneshotExecutionDataConfig struct { @@ -29,6 +39,8 @@ type OneshotExecutionDataConfig struct { MaxRetryDelay time.Duration } +var _ ExecutionDataRequester = (*OneshotExecutionDataRequester)(nil) + // OneshotExecutionDataRequester is a component that requests execution data for a block. // It uses a retry mechanism to retry the download execution data if they are not found. type OneshotExecutionDataRequester struct { diff --git a/module/util/folder.go b/module/util/folder.go new file mode 100644 index 00000000000..c168a58ce35 --- /dev/null +++ b/module/util/folder.go @@ -0,0 +1,32 @@ +package util + +import ( + "fmt" + "os" +) + +// IsEmptyOrNotExists returns true if the directory does not exist or is empty. +// It returns an error if there's an issue accessing the directory. +func IsEmptyOrNotExists(path string) (bool, error) { + // Check if the path exists + info, err := os.Stat(path) + if os.IsNotExist(err) { + // Directory does not exist + return true, nil + } + if err != nil { + return false, fmt.Errorf("error stating path %s: %w", path, err) + } + if !info.IsDir() { + return false, fmt.Errorf("path %s exists but is not a directory", path) + } + + // Read directory contents + files, err := os.ReadDir(path) + if err != nil { + return false, fmt.Errorf("error reading directory %s: %w", path, err) + } + + // If the directory has no entries, it's empty + return len(files) == 0, nil +} diff --git a/module/validation/seal_validator.go b/module/validation/seal_validator.go index f396268ba62..cbe4d8dc38c 100644 --- a/module/validation/seal_validator.go +++ b/module/validation/seal_validator.go @@ -55,11 +55,15 @@ func (s *sealValidator) verifySealSignature(aggregatedSignatures *flow.Aggregate chunk *flow.Chunk, executionResultID flow.Identifier) error { // TODO: replace implementation once proper aggregation is used for Verifiers' attestation signatures. - atst := flow.Attestation{ + atst, err := flow.NewAttestation(flow.UntrustedAttestation{ BlockID: chunk.BlockID, ExecutionResultID: executionResultID, ChunkIndex: chunk.Index, + }) + if err != nil { + return fmt.Errorf("could not build attestation: %w", err) } + atstID := atst.ID() for i, signature := range aggregatedSignatures.VerifierSignatures { @@ -169,7 +173,14 @@ func (s *sealValidator) Validate(candidate *flow.Block) (*flow.Seal, error) { if err != nil { return fmt.Errorf("internal error fetching result %v incorporated in block %v: %w", resultID, blockID, err) } - incorporatedResults[resultID] = flow.NewIncorporatedResult(blockID, result) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: blockID, + Result: result, + }) + if err != nil { + return fmt.Errorf("could not create incorporated result: %w", err) + } + incorporatedResults[resultID] = incorporatedResult } return nil } diff --git a/network/codec/codes.go b/network/codec/codes.go index 8e6eef7b7fa..3fa38649e57 100644 --- a/network/codec/codes.go +++ b/network/codec/codes.go @@ -192,7 +192,8 @@ func InterfaceFromMessageCode(code MessageCode) (interface{}, string, error) { case CodeExecutionReceipt: return &flow.ExecutionReceipt{}, what(&flow.ExecutionReceipt{}), nil case CodeResultApproval: - return &flow.ResultApproval{}, what(&flow.ResultApproval{}), nil + var approval flow.ResultApproval + return &approval, what(&approval), nil // data exchange for execution of blocks case CodeChunkDataRequest: diff --git a/network/message/authorization.go b/network/message/authorization.go index 9b16a90141f..d752e9d8f78 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -123,7 +123,7 @@ func initializeMessageAuthConfigsMap() { }, Config: map[channels.Channel]ChannelAuthConfig{ channels.SyncCommittee: { - AuthorizedRoles: flow.RoleList{flow.RoleConsensus}, + AuthorizedRoles: flow.RoleList{flow.RoleConsensus, flow.RoleExecution}, AllowedProtocols: Protocols{ProtocolTypeUnicast}, }, channels.SyncClusterPrefix: { @@ -171,7 +171,7 @@ func initializeMessageAuthConfigsMap() { }, Config: map[channels.Channel]ChannelAuthConfig{ channels.SyncCommittee: { - AuthorizedRoles: flow.RoleList{flow.RoleConsensus}, + AuthorizedRoles: flow.RoleList{flow.RoleConsensus, flow.RoleExecution}, AllowedProtocols: Protocols{ProtocolTypeUnicast}, }, }, diff --git a/network/p2p/logging/internal/peerIdCache.go b/network/p2p/logging/internal/peerIdCache.go index 5474826489d..e5bf8c538cc 100644 --- a/network/p2p/logging/internal/peerIdCache.go +++ b/network/p2p/logging/internal/peerIdCache.go @@ -3,7 +3,7 @@ package internal import ( "fmt" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" "github.com/libp2p/go-libp2p/core/peer" ) @@ -13,11 +13,11 @@ type PeerIdCache struct { // to using it here. // This PeerIdCache is used extensively across the codebase, so any minor import cycle will cause // a lot of trouble. - peerCache *lru.Cache + peerCache *lru.Cache[peer.ID, string] } func NewPeerIdCache(size int) (*PeerIdCache, error) { - c, err := lru.New(size) + c, err := lru.New[peer.ID, string](size) if err != nil { return nil, fmt.Errorf("failed to create peer id cache: %w", err) } @@ -32,7 +32,7 @@ func NewPeerIdCache(size int) (*PeerIdCache, error) { func (p *PeerIdCache) PeerIdString(pid peer.ID) string { pidStr, ok := p.peerCache.Get(pid) if ok { - return pidStr.(string) + return pidStr } pidStr0 := pid.String() @@ -50,7 +50,7 @@ func (p *PeerIdCache) Size() int { func (p *PeerIdCache) ByPeerId(pid peer.ID) (string, bool) { pidStr, ok := p.peerCache.Get(pid) if ok { - return pidStr.(string), true + return pidStr, true } return "", false } diff --git a/state/cluster/badger/mutator.go b/state/cluster/badger/mutator.go index a4d867f4a8a..877eb1e6aa7 100644 --- a/state/cluster/badger/mutator.go +++ b/state/cluster/badger/mutator.go @@ -6,7 +6,7 @@ import ( "fmt" "math" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" @@ -17,25 +17,27 @@ import ( clusterstate "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/state/fork" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/procedure" ) type MutableState struct { *State - tracer module.Tracer - headers storage.Headers - payloads storage.ClusterPayloads + lockManager lockctx.Manager + tracer module.Tracer + headers storage.Headers + payloads storage.ClusterPayloads } var _ clusterstate.MutableState = (*MutableState)(nil) -func NewMutableState(state *State, tracer module.Tracer, headers storage.Headers, payloads storage.ClusterPayloads) (*MutableState, error) { +func NewMutableState(state *State, lockManager lockctx.Manager, tracer module.Tracer, headers storage.Headers, payloads storage.ClusterPayloads) (*MutableState, error) { mutableState := &MutableState{ - State: state, - tracer: tracer, - headers: headers, - payloads: payloads, + State: state, + lockManager: lockManager, + tracer: tracer, + headers: headers, + payloads: payloads, } return mutableState, nil } @@ -57,36 +59,32 @@ func (m *MutableState) getExtendCtx(candidate *cluster.Block) (extendContext, er var ctx extendContext ctx.candidate = candidate - err := m.State.db.View(func(tx *badger.Txn) error { - // get the latest finalized cluster block and latest finalized consensus height - ctx.finalizedClusterBlock = new(flow.Header) - err := procedure.RetrieveLatestFinalizedClusterHeader(candidate.Header.ChainID, ctx.finalizedClusterBlock)(tx) - if err != nil { - return fmt.Errorf("could not retrieve finalized cluster head: %w", err) - } - err = operation.RetrieveFinalizedHeight(&ctx.finalizedConsensusHeight)(tx) - if err != nil { - return fmt.Errorf("could not retrieve finalized height on consensus chain: %w", err) - } + r := m.State.db.Reader() + // get the latest finalized cluster block and latest finalized consensus height + ctx.finalizedClusterBlock = new(flow.Header) + err := procedure.RetrieveLatestFinalizedClusterHeader(r, candidate.Header.ChainID, ctx.finalizedClusterBlock) + if err != nil { + return extendContext{}, fmt.Errorf("could not retrieve finalized cluster head: %w", err) + } + err = operation.RetrieveFinalizedHeight(r, &ctx.finalizedConsensusHeight) + if err != nil { + return extendContext{}, fmt.Errorf("could not retrieve finalized height on consensus chain: %w", err) + } - err = operation.RetrieveEpochFirstHeight(m.State.epoch, &ctx.epochFirstHeight)(tx) - if err != nil { - return fmt.Errorf("could not get operating epoch first height: %w", err) - } - err = operation.RetrieveEpochLastHeight(m.State.epoch, &ctx.epochLastHeight)(tx) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - ctx.epochHasEnded = false - return nil - } - return fmt.Errorf("unexpected failure to retrieve final height of operating epoch: %w", err) - } - ctx.epochHasEnded = true - return nil - }) + err = operation.RetrieveEpochFirstHeight(r, m.State.epoch, &ctx.epochFirstHeight) if err != nil { - return extendContext{}, fmt.Errorf("could not read required state information for Extend checks: %w", err) + return extendContext{}, fmt.Errorf("could not get operating epoch first height: %w", err) } + err = operation.RetrieveEpochLastHeight(r, m.State.epoch, &ctx.epochLastHeight) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + ctx.epochHasEnded = false + return ctx, nil + } + return extendContext{}, fmt.Errorf("unexpected failure to retrieve final height of operating epoch: %w", err) + } + ctx.epochHasEnded = true + return ctx, nil } @@ -130,19 +128,29 @@ func (m *MutableState) Extend(candidate *cluster.Block) error { return fmt.Errorf("error checking reference block: %w", err) } + lctx := m.lockManager.NewContext() + err = lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock) + if err != nil { + return fmt.Errorf("could not acquire lock for inserting cluster block: %w", err) + } + defer lctx.Release() + span, _ = m.tracer.StartSpanFromContext(ctx, trace.COLClusterStateMutatorExtendCheckTransactionsValid) - err = m.checkPayloadTransactions(extendCtx) + err = m.checkPayloadTransactions(lctx, extendCtx) span.End() if err != nil { return fmt.Errorf("error checking payload transactions: %w", err) } span, _ = m.tracer.StartSpanFromContext(ctx, trace.COLClusterStateMutatorExtendDBInsert) - err = operation.RetryOnConflict(m.State.db.Update, procedure.InsertClusterBlock(candidate)) + err = m.State.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.InsertClusterBlock(lctx, rw, candidate) + }) span.End() if err != nil { return fmt.Errorf("could not insert cluster block: %w", err) } + return nil } @@ -266,7 +274,7 @@ func (m *MutableState) checkPayloadReferenceBlock(ctx extendContext) error { // Expected error returns: // - state.InvalidExtensionError if the reference block is invalid for use. // - state.UnverifiableExtensionError if the reference block is unknown. -func (m *MutableState) checkPayloadTransactions(ctx extendContext) error { +func (m *MutableState) checkPayloadTransactions(lctx lockctx.Proof, ctx extendContext) error { block := ctx.candidate payload := block.Payload @@ -334,7 +342,11 @@ func (m *MutableState) checkPayloadTransactions(ctx extendContext) error { } // second, check for duplicate transactions in the finalized ancestry - duplicateTxIDs, err = m.checkDupeTransactionsInFinalizedAncestry(txLookup, minRefHeight, maxRefHeight) + // CAUTION: Finalization might progress while we are running this logic. However, finalization is not guaranteed to + // follow the same fork as the one we are extending here. Hence, we might apply the transaction de-duplication logic + // against blocks that do not belong to our fork. If we erroneously find a duplicated transaction, based on a block + // that is not part of our fork, we would be raising an invalid slashing challenge, which would get this node slashed. + duplicateTxIDs, err = m.checkDupeTransactionsInFinalizedAncestry(lctx, txLookup, minRefHeight, maxRefHeight) if err != nil { return fmt.Errorf("could not check for duplicate txs in finalized ancestry: %w", err) } @@ -348,7 +360,6 @@ func (m *MutableState) checkPayloadTransactions(ctx extendContext) error { // checkDupeTransactionsInUnfinalizedAncestry checks for duplicate transactions in the un-finalized // ancestry of the given block, and returns a list of all duplicates if there are any. func (m *MutableState) checkDupeTransactionsInUnfinalizedAncestry(block *cluster.Block, includedTransactions map[flow.Identifier]struct{}, finalHeight uint64) ([]flow.Identifier, error) { - var duplicateTxIDs []flow.Identifier err := fork.TraverseBackward(m.headers, block.Header.ParentID, func(ancestor *flow.Header) error { payload, err := m.payloads.ByBlockID(ancestor.ID()) @@ -371,7 +382,7 @@ func (m *MutableState) checkDupeTransactionsInUnfinalizedAncestry(block *cluster // checkDupeTransactionsInFinalizedAncestry checks for duplicate transactions in the finalized // ancestry, and returns a list of all duplicates if there are any. -func (m *MutableState) checkDupeTransactionsInFinalizedAncestry(includedTransactions map[flow.Identifier]struct{}, minRefHeight, maxRefHeight uint64) ([]flow.Identifier, error) { +func (m *MutableState) checkDupeTransactionsInFinalizedAncestry(lctx lockctx.Proof, includedTransactions map[flow.Identifier]struct{}, minRefHeight, maxRefHeight uint64) ([]flow.Identifier, error) { var duplicatedTxIDs []flow.Identifier // Let E be the global transaction expiry constant, measured in blocks. For each @@ -385,8 +396,8 @@ func (m *MutableState) checkDupeTransactionsInFinalizedAncestry(includedTransact // Boundary conditions: // 1. C's reference block height is equal to the lowest reference block height of // all its constituent transactions. Hence, for collection C to potentially contain T, it must satisfy c <= t. - // 2. For T to be eligible for inclusion in collection C, _none_ of the transactions within C are allowed - // to be expired w.r.t. C's reference block. Hence, for collection C to potentially contain T, it must satisfy t < c + E. + // 2. For T to be eligible for inclusion in collection C, _none_ of the transactions within C are allowed to be + // expired w.r.t. C's reference block. Hence, for collection C to potentially contain T, it must satisfy t < c + E. // // Therefore, for collection C to potentially contain transaction T, it must satisfy t - E < c <= t. // In other words, we only need to inspect collections with reference block height c ∈ (t-E, t]. @@ -400,7 +411,7 @@ func (m *MutableState) checkDupeTransactionsInFinalizedAncestry(includedTransact start = 0 // overflow check } end := maxRefHeight - err := m.db.View(operation.LookupClusterBlocksByReferenceHeightRange(start, end, &clusterBlockIDs)) + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, m.db.Reader(), start, end, &clusterBlockIDs) if err != nil { return nil, fmt.Errorf("could not lookup finalized cluster blocks by reference height range [%d,%d]: %w", start, end, err) } diff --git a/state/cluster/badger/mutator_test.go b/state/cluster/badger/mutator_test.go index de2a2d33950..688622b857f 100644 --- a/state/cluster/badger/mutator_test.go +++ b/state/cluster/badger/mutator_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -27,16 +28,20 @@ import ( "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" protocolutil "github.com/onflow/flow-go/state/protocol/util" - storage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/procedure" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) type MutatorSuite struct { suite.Suite - db *badger.DB - dbdir string + db storage.DB + badgerdb *badger.DB + dbdir string + lockManager lockctx.Manager genesis *model.Block chainID flow.ChainID @@ -58,13 +63,15 @@ func (suite *MutatorSuite) SetupTest() { suite.chainID = suite.genesis.Header.ChainID suite.dbdir = unittest.TempDir(suite.T()) - suite.db = unittest.BadgerDB(suite.T(), suite.dbdir) + suite.badgerdb = unittest.BadgerDB(suite.T(), suite.dbdir) + suite.db = badgerimpl.ToDB(suite.badgerdb) + suite.lockManager = storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() - all := storage.InitAll(metrics, suite.db) - colPayloads := storage.NewClusterPayloads(metrics, suite.db) + all := store.InitAll(metrics, suite.db) + colPayloads := store.NewClusterPayloads(metrics, suite.db) // just bootstrap with a genesis block, we'll use this as reference genesis, result, seal := unittest.BootstrapFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) @@ -76,13 +83,16 @@ func (suite *MutatorSuite) SetupTest() { qc := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(genesis.ID())) safetyParams, err := protocol.DefaultEpochSafetyParams(genesis.Header.ChainID) require.NoError(suite.T(), err) + minEpochStateEntry, err := inmem.EpochProtocolStateFromServiceEvents( + result.ServiceEvents[0].Event.(*flow.EpochSetup), + result.ServiceEvents[1].Event.(*flow.EpochCommit), + ) + require.NoError(suite.T(), err) rootProtocolState, err := kvstore.NewDefaultKVStore( safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, - inmem.EpochProtocolStateFromServiceEvents( - result.ServiceEvents[0].Event.(*flow.EpochSetup), - result.ServiceEvents[1].Event.(*flow.EpochCommit), - ).ID()) + minEpochStateEntry.ID(), + ) require.NoError(suite.T(), err) genesis.Payload.ProtocolStateID = rootProtocolState.ID() rootSnapshot, err := inmem.SnapshotFromBootstrapState(genesis, result, seal, qc) @@ -93,12 +103,13 @@ func (suite *MutatorSuite) SetupTest() { state, err := pbadger.Bootstrap( metrics, suite.db, + suite.lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, all.EpochProtocolStateEntries, all.ProtocolKVStore, @@ -106,7 +117,9 @@ func (suite *MutatorSuite) SetupTest() { rootSnapshot, ) require.NoError(suite.T(), err) - suite.protoState, err = pbadger.NewFollowerState(log, tracer, events.NewNoop(), state, all.Index, all.Payloads, protocolutil.MockBlockTimer()) + suite.protoState, err = pbadger.NewFollowerState( + log, tracer, events.NewNoop(), state, all.Index, all.Payloads, protocolutil.MockBlockTimer(), + ) require.NoError(suite.T(), err) suite.mutableProtocolState = protocol_state.NewMutableProtocolState( @@ -116,21 +129,21 @@ func (suite *MutatorSuite) SetupTest() { state.Params(), all.Headers, all.Results, - all.Setups, + all.EpochSetups, all.EpochCommits, ) clusterStateRoot, err := NewStateRoot(suite.genesis, unittest.QuorumCertificateFixture(), suite.epochCounter) suite.NoError(err) - clusterState, err := Bootstrap(suite.db, clusterStateRoot) + clusterState, err := Bootstrap(suite.db, suite.lockManager, clusterStateRoot) suite.Assert().Nil(err) - suite.state, err = NewMutableState(clusterState, tracer, all.Headers, colPayloads) + suite.state, err = NewMutableState(clusterState, suite.lockManager, tracer, all.Headers, colPayloads) suite.Assert().Nil(err) } // runs after each test finishes func (suite *MutatorSuite) TearDownTest() { - err := suite.db.Close() + err := suite.badgerdb.Close() suite.Assert().Nil(err) err = os.RemoveAll(suite.dbdir) suite.Assert().Nil(err) @@ -171,18 +184,19 @@ func (suite *MutatorSuite) Block() model.Block { } func (suite *MutatorSuite) FinalizeBlock(block model.Block) { - err := suite.db.Update(func(tx *badger.Txn) error { - var refBlock flow.Header - err := operation.RetrieveHeader(block.Payload.ReferenceBlockID, &refBlock)(tx) - if err != nil { - return err - } - err = procedure.FinalizeClusterBlock(block.ID())(tx) + var refBlock flow.Header + err := operation.RetrieveHeader(suite.db.Reader(), block.Payload.ReferenceBlockID, &refBlock) + suite.Require().Nil(err) + + lctx := suite.lockManager.NewContext() + defer lctx.Release() + require.NoError(suite.T(), lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + err = suite.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err = procedure.FinalizeClusterBlock(lctx, rw, block.ID()) if err != nil { return err } - err = operation.IndexClusterBlockByReferenceHeight(refBlock.Height, block.ID())(tx) - return err + return operation.IndexClusterBlockByReferenceHeight(lctx, rw.Writer(), refBlock.Height, block.ID()) }) suite.Assert().NoError(err) } @@ -230,40 +244,40 @@ func (suite *MutatorSuite) TestBootstrap_InvalidPayload() { } func (suite *MutatorSuite) TestBootstrap_Successful() { - err := suite.db.View(func(tx *badger.Txn) error { + err := (func(r storage.Reader) error { // should insert collection var collection flow.LightCollection - err := operation.RetrieveCollection(suite.genesis.Payload.Collection.ID(), &collection)(tx) + err := operation.RetrieveCollection(r, suite.genesis.Payload.Collection.ID(), &collection) suite.Assert().Nil(err) suite.Assert().Equal(suite.genesis.Payload.Collection.Light(), collection) // should index collection collection = flow.LightCollection{} // reset the collection - err = operation.LookupCollectionPayload(suite.genesis.ID(), &collection.Transactions)(tx) + err = operation.LookupCollectionPayload(r, suite.genesis.ID(), &collection.Transactions) suite.Assert().Nil(err) suite.Assert().Equal(suite.genesis.Payload.Collection.Light(), collection) // should insert header var header flow.Header - err = operation.RetrieveHeader(suite.genesis.ID(), &header)(tx) + err = operation.RetrieveHeader(r, suite.genesis.ID(), &header) suite.Assert().Nil(err) suite.Assert().Equal(suite.genesis.Header.ID(), header.ID()) // should insert block height -> ID lookup var blockID flow.Identifier - err = operation.LookupClusterBlockHeight(suite.genesis.Header.ChainID, suite.genesis.Header.Height, &blockID)(tx) + err = operation.LookupClusterBlockHeight(r, suite.genesis.Header.ChainID, suite.genesis.Header.Height, &blockID) suite.Assert().Nil(err) suite.Assert().Equal(suite.genesis.ID(), blockID) // should insert boundary var boundary uint64 - err = operation.RetrieveClusterFinalizedHeight(suite.genesis.Header.ChainID, &boundary)(tx) + err = operation.RetrieveClusterFinalizedHeight(r, suite.genesis.Header.ChainID, &boundary) suite.Assert().Nil(err) suite.Assert().Equal(suite.genesis.Header.Height, boundary) return nil - }) + })(suite.db.Reader()) suite.Assert().Nil(err) } @@ -342,15 +356,16 @@ func (suite *MutatorSuite) TestExtend_Success() { err := suite.state.Extend(&block) suite.Assert().Nil(err) + r := suite.db.Reader() // should be able to retrieve the block var extended model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(block.ID(), &extended)) + err = procedure.RetrieveClusterBlock(r, block.ID(), &extended) suite.Assert().Nil(err) suite.Assert().Equal(*block.Payload, *extended.Payload) // the block should be indexed by its parent var childIDs flow.IdentifierList - err = suite.db.View(procedure.LookupBlockChildren(suite.genesis.ID(), &childIDs)) + err = procedure.LookupBlockChildren(r, suite.genesis.ID(), &childIDs) suite.Assert().Nil(err) suite.Require().Len(childIDs, 1) suite.Assert().Equal(block.ID(), childIDs[0]) @@ -588,7 +603,7 @@ func (suite *MutatorSuite) TestExtend_LargeHistory() { // conflicting fork, build on the parent of the head parent := head if conflicting { - err = suite.db.View(procedure.RetrieveClusterBlock(parent.Header.ParentID, &parent)) + err = procedure.RetrieveClusterBlock(suite.db.Reader(), parent.Header.ParentID, &parent) assert.NoError(t, err) // add the transaction to the invalidated list invalidatedTransactions = append(invalidatedTransactions, &tx) diff --git a/state/cluster/badger/snapshot.go b/state/cluster/badger/snapshot.go index 7823f700163..11969ed828e 100644 --- a/state/cluster/badger/snapshot.go +++ b/state/cluster/badger/snapshot.go @@ -3,12 +3,11 @@ package badger import ( "fmt" - "github.com/dgraph-io/badger/v2" - "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + clusterState "github.com/onflow/flow-go/state/cluster" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/procedure" ) // Snapshot represents a snapshot of chain state anchored at a particular @@ -19,35 +18,32 @@ type Snapshot struct { blockID flow.Identifier } +var _ clusterState.Snapshot = (*Snapshot)(nil) + func (s *Snapshot) Collection() (*flow.Collection, error) { if s.err != nil { return nil, s.err } var collection flow.Collection - err := s.state.db.View(func(tx *badger.Txn) error { - - // get the header for this snapshot - var header flow.Header - err := s.head(&header)(tx) - if err != nil { - return fmt.Errorf("failed to get snapshot header: %w", err) - } - - // get the payload - var payload cluster.Payload - err = procedure.RetrieveClusterPayload(header.ID(), &payload)(tx) - if err != nil { - return fmt.Errorf("failed to get snapshot payload: %w", err) - } + // get the header for this snapshot + var header flow.Header + err := s.head(&header) + if err != nil { + return nil, fmt.Errorf("failed to get snapshot header: %w", err) + } - // set the collection - collection = payload.Collection + // get the payload + var payload cluster.Payload + err = procedure.RetrieveClusterPayload(s.state.db.Reader(), s.blockID, &payload) + if err != nil { + return nil, fmt.Errorf("failed to get snapshot payload: %w", err) + } - return nil - }) + // set the collection + collection = payload.Collection - return &collection, err + return &collection, nil } func (s *Snapshot) Head() (*flow.Header, error) { @@ -56,9 +52,7 @@ func (s *Snapshot) Head() (*flow.Header, error) { } var head flow.Header - err := s.state.db.View(func(tx *badger.Txn) error { - return s.head(&head)(tx) - }) + err := s.head(&head) return &head, err } @@ -70,23 +64,18 @@ func (s *Snapshot) Pending() ([]flow.Identifier, error) { } // head finds the header referenced by the snapshot. -func (s *Snapshot) head(head *flow.Header) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - - // get the snapshot header - err := operation.RetrieveHeader(s.blockID, head)(tx) - if err != nil { - return fmt.Errorf("could not retrieve header for block (%s): %w", s.blockID, err) - } - - return nil +func (s *Snapshot) head(head *flow.Header) error { + // get the snapshot header + err := operation.RetrieveHeader(s.state.db.Reader(), s.blockID, head) + if err != nil { + return fmt.Errorf("could not retrieve header for block (%s): %w", s.blockID, err) } + return nil } func (s *Snapshot) pending(blockID flow.Identifier) ([]flow.Identifier, error) { - var pendingIDs flow.IdentifierList - err := s.state.db.View(procedure.LookupBlockChildren(blockID, &pendingIDs)) + err := procedure.LookupBlockChildren(s.state.db.Reader(), blockID, &pendingIDs) if err != nil { return nil, fmt.Errorf("could not get pending children: %w", err) } diff --git a/state/cluster/badger/snapshot_test.go b/state/cluster/badger/snapshot_test.go index 1674043167a..0af5c3deb6f 100644 --- a/state/cluster/badger/snapshot_test.go +++ b/state/cluster/badger/snapshot_test.go @@ -6,7 +6,9 @@ import ( "testing" "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" model "github.com/onflow/flow-go/model/cluster" @@ -16,16 +18,21 @@ import ( "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/state/protocol" pbadger "github.com/onflow/flow-go/state/protocol/badger" - storage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/procedure" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) type SnapshotSuite struct { suite.Suite - db *badger.DB - dbdir string + + db storage.DB + badgerdb *badger.DB + dbdir string + lockManager lockctx.Manager genesis *model.Block chainID flow.ChainID @@ -44,13 +51,15 @@ func (suite *SnapshotSuite) SetupTest() { suite.chainID = suite.genesis.Header.ChainID suite.dbdir = unittest.TempDir(suite.T()) - suite.db = unittest.BadgerDB(suite.T(), suite.dbdir) + suite.badgerdb = unittest.BadgerDB(suite.T(), suite.dbdir) + suite.db = badgerimpl.ToDB(suite.badgerdb) + suite.lockManager = storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() - all := storage.InitAll(metrics, suite.db) - colPayloads := storage.NewClusterPayloads(metrics, suite.db) + all := store.InitAll(metrics, suite.db) + colPayloads := store.NewClusterPayloads(metrics, suite.db) root := unittest.RootSnapshotFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) suite.epochCounter = root.Encodable().SealingSegment.LatestProtocolStateEntry().EpochEntry.EpochCounter() @@ -58,12 +67,13 @@ func (suite *SnapshotSuite) SetupTest() { suite.protoState, err = pbadger.Bootstrap( metrics, suite.db, + suite.lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, all.EpochProtocolStateEntries, all.ProtocolKVStore, @@ -74,15 +84,15 @@ func (suite *SnapshotSuite) SetupTest() { clusterStateRoot, err := NewStateRoot(suite.genesis, unittest.QuorumCertificateFixture(), suite.epochCounter) suite.Require().NoError(err) - clusterState, err := Bootstrap(suite.db, clusterStateRoot) + clusterState, err := Bootstrap(suite.db, suite.lockManager, clusterStateRoot) suite.Require().NoError(err) - suite.state, err = NewMutableState(clusterState, tracer, all.Headers, colPayloads) + suite.state, err = NewMutableState(clusterState, suite.lockManager, tracer, all.Headers, colPayloads) suite.Require().NoError(err) } // runs after each test finishes func (suite *SnapshotSuite) TearDownTest() { - err := suite.db.Close() + err := suite.badgerdb.Close() suite.Assert().Nil(err) err = os.RemoveAll(suite.dbdir) suite.Assert().Nil(err) @@ -123,7 +133,13 @@ func (suite *SnapshotSuite) Block() model.Block { } func (suite *SnapshotSuite) InsertBlock(block model.Block) { - err := suite.db.Update(procedure.InsertClusterBlock(&block)) + lctx := suite.lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock) + suite.Assert().Nil(err) + err = suite.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.InsertClusterBlock(lctx, rw, &block) + }) suite.Assert().Nil(err) } @@ -210,7 +226,12 @@ func (suite *SnapshotSuite) TestFinalizedBlock() { assert.NoError(t, err) // finalize the block - err = suite.db.Update(procedure.FinalizeClusterBlock(finalizedBlock1.ID())) + lctx := suite.lockManager.NewContext() + defer lctx.Release() + require.NoError(suite.T(), lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + err = suite.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.FinalizeClusterBlock(lctx, rw, finalizedBlock1.ID()) + }) assert.NoError(t, err) // get the final snapshot, should map to finalizedBlock1 @@ -277,7 +298,7 @@ func (suite *SnapshotSuite) TestPending_Grandchildren() { for _, blockID := range pending { var header flow.Header - err := suite.db.View(operation.RetrieveHeader(blockID, &header)) + err := operation.RetrieveHeader(suite.db.Reader(), blockID, &header) suite.Require().Nil(err) // we must have already seen the parent diff --git a/state/cluster/badger/state.go b/state/cluster/badger/state.go index f088328823e..447dddefcf9 100644 --- a/state/cluster/badger/state.go +++ b/state/cluster/badger/state.go @@ -4,19 +4,19 @@ import ( "errors" "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/procedure" ) type State struct { - db *badger.DB + db storage.DB clusterID flow.ChainID // the chain ID for the cluster epoch uint64 // the operating epoch for the cluster } @@ -24,7 +24,13 @@ type State struct { // Bootstrap initializes the persistent cluster state with a genesis block. // The genesis block must have height 0, a parent hash of 32 zero bytes, // and an empty collection as payload. -func Bootstrap(db *badger.DB, stateRoot *StateRoot) (*State, error) { +func Bootstrap(db storage.DB, lockManager lockctx.Manager, stateRoot *StateRoot) (*State, error) { + lctx := lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock `storage.LockInsertOrFinalizeClusterBlock` for inserting cluster block: %w", err) + } isBootstrapped, err := IsBootstrapped(db, stateRoot.ClusterID()) if err != nil { return nil, fmt.Errorf("failed to determine whether database contains bootstrapped state: %w", err) @@ -36,21 +42,22 @@ func Bootstrap(db *badger.DB, stateRoot *StateRoot) (*State, error) { genesis := stateRoot.Block() rootQC := stateRoot.QC() + // bootstrap cluster state - err = operation.RetryOnConflict(state.db.Update, func(tx *badger.Txn) error { + err = state.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { chainID := genesis.Header.ChainID // insert the block - err := procedure.InsertClusterBlock(genesis)(tx) + err := procedure.InsertClusterBlock(lctx, rw, genesis) if err != nil { return fmt.Errorf("could not insert genesis block: %w", err) } // insert block height -> ID mapping - err = operation.IndexClusterBlockHeight(chainID, genesis.Header.Height, genesis.ID())(tx) + err = operation.IndexClusterBlockHeight(lctx, rw.Writer(), chainID, genesis.Header.Height, genesis.ID()) if err != nil { return fmt.Errorf("failed to map genesis block height to block: %w", err) } // insert boundary - err = operation.InsertClusterFinalizedHeight(chainID, genesis.Header.Height)(tx) + err = operation.UpsertClusterFinalizedHeight(lctx, rw.Writer(), chainID, genesis.Header.Height) // insert started view for hotstuff if err != nil { return fmt.Errorf("could not insert genesis boundary: %w", err) @@ -66,12 +73,12 @@ func Bootstrap(db *badger.DB, stateRoot *StateRoot) (*State, error) { NewestQC: rootQC, } // insert safety data - err = operation.InsertSafetyData(chainID, safetyData)(tx) + err = operation.UpsertSafetyData(rw.Writer(), chainID, safetyData) if err != nil { return fmt.Errorf("could not insert safety data: %w", err) } // insert liveness data - err = operation.InsertLivenessData(chainID, livenessData)(tx) + err = operation.UpsertLivenessData(rw.Writer(), chainID, livenessData) if err != nil { return fmt.Errorf("could not insert liveness data: %w", err) } @@ -85,7 +92,7 @@ func Bootstrap(db *badger.DB, stateRoot *StateRoot) (*State, error) { return state, nil } -func OpenState(db *badger.DB, _ module.Tracer, _ storage.Headers, _ storage.ClusterPayloads, clusterID flow.ChainID, epoch uint64) (*State, error) { +func OpenState(db storage.DB, _ module.Tracer, _ storage.Headers, _ storage.ClusterPayloads, clusterID flow.ChainID, epoch uint64) (*State, error) { isBootstrapped, err := IsBootstrapped(db, clusterID) if err != nil { return nil, fmt.Errorf("failed to determine whether database contains bootstrapped state: %w", err) @@ -97,7 +104,7 @@ func OpenState(db *badger.DB, _ module.Tracer, _ storage.Headers, _ storage.Clus return state, nil } -func newState(db *badger.DB, clusterID flow.ChainID, epoch uint64) *State { +func newState(db storage.DB, clusterID flow.ChainID, epoch uint64) *State { state := &State{ db: db, clusterID: clusterID, @@ -116,20 +123,21 @@ func (s *State) Params() cluster.Params { func (s *State) Final() cluster.Snapshot { // get the finalized block ID var blockID flow.Identifier - err := s.db.View(func(tx *badger.Txn) error { + err := (func(r storage.Reader) error { var boundary uint64 - err := operation.RetrieveClusterFinalizedHeight(s.clusterID, &boundary)(tx) + err := operation.RetrieveClusterFinalizedHeight(r, s.clusterID, &boundary) if err != nil { return fmt.Errorf("could not retrieve finalized boundary: %w", err) } - err = operation.LookupClusterBlockHeight(s.clusterID, boundary, &blockID)(tx) + err = operation.LookupClusterBlockHeight(r, s.clusterID, boundary, &blockID) if err != nil { return fmt.Errorf("could not retrieve finalized ID: %w", err) } return nil - }) + })(s.db.Reader()) + if err != nil { return &Snapshot{ err: err, @@ -152,9 +160,9 @@ func (s *State) AtBlockID(blockID flow.Identifier) cluster.Snapshot { } // IsBootstrapped returns whether the database contains a bootstrapped state. -func IsBootstrapped(db *badger.DB, clusterID flow.ChainID) (bool, error) { +func IsBootstrapped(db storage.DB, clusterID flow.ChainID) (bool, error) { var finalized uint64 - err := db.View(operation.RetrieveClusterFinalizedHeight(clusterID, &finalized)) + err := operation.RetrieveClusterFinalizedHeight(db.Reader(), clusterID, &finalized) if errors.Is(err, storage.ErrNotFound) { return false, nil } diff --git a/state/cluster/snapshot.go b/state/cluster/snapshot.go index c69b73844eb..4472ddaa358 100644 --- a/state/cluster/snapshot.go +++ b/state/cluster/snapshot.go @@ -18,7 +18,7 @@ type Snapshot interface { // latest finalized block. Head() (*flow.Header, error) - // Pending returns all children IDs for the snapshot head, which thus were + // Pending returns the IDs of all blocks descending from the snapshot head, which thus were // potential extensions of the protocol state at this snapshot. The result // is ordered such that parents are included before their children. These // are NOT guaranteed to have been validated by HotStuff. diff --git a/state/protocol/badger/mutator.go b/state/protocol/badger/mutator.go index bdbdac11b8f..90ca605628a 100644 --- a/state/protocol/badger/mutator.go +++ b/state/protocol/badger/mutator.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" @@ -18,9 +18,9 @@ import ( "github.com/onflow/flow-go/state/protocol" protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage/deferred" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/procedure" ) // FollowerState implements a lighter version of a mutable protocol state. @@ -51,6 +51,7 @@ var _ protocol.FollowerState = (*FollowerState)(nil) // state with a new block, by checking the _entire_ block payload. type ParticipantState struct { *FollowerState + receiptValidator module.ReceiptValidator sealValidator module.SealValidator } @@ -125,7 +126,7 @@ func NewFullConsensusState( } // ExtendCertified extends the protocol state of a CONSENSUS FOLLOWER. While it checks -// the validity of the header; it does _not_ check the validity of the payload. +// the validity of the header, it does _not_ check the validity of the payload. // Instead, the consensus follower relies on the consensus participants to // validate the full payload. Payload validity can be proved by a valid quorum certificate. // Certifying QC must match candidate block: @@ -134,7 +135,9 @@ func NewFullConsensusState( // // CAUTION: // - This function expects that `certifyingQC ` has been validated. (otherwise, the state will be corrupted) -// - The parent block must already have been ingested. +// - The PARENT block must already have been INGESTED. +// - Attempts to extend the state with the _same block concurrently_ are not allowed. +// (will not corrupt the state, but may lead to an exception) // // Per convention, the protocol state requires that the candidate's parent has already been ingested. // Other than that, all valid extensions are accepted. Even if we have enough information to determine that @@ -147,7 +150,16 @@ func NewFullConsensusState( // determine it is orphaned and drop it, attempt to ingest Y re-request the unknown parent X and repeat // potentially very often. // +// To ensure that all ancestors of a candidate block are correct and known to the FollowerState, some external +// ordering and queuing of incoming blocks is generally necessary (responsibility of Compliance Layer). Once a block +// is successfully ingested, repeated extension requests with this block are no-ops. This is convenient for the +// Compliance Layer after a crash, so it doesn't have to worry about which blocks have already been ingested before +// the crash. However, while running it is very easy for the Compliance Layer to avoid concurrent extension requests +// with the same block. Hence, for simplicity, the FollowerState may reject such requests with an exception. +// // No errors are expected during normal operations. +// - In case of concurrent calls with the same `candidate` block, ExtendCertified may return a [storage.ErrAlreadyExists] +// or it may gracefully return. At the moment, ExtendCertified should be considered as not concurrency-safe. func (m *FollowerState) ExtendCertified(ctx context.Context, candidate *flow.Block, certifyingQC *flow.QuorumCertificate) error { span, ctx := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorHeaderExtend) defer span.End() @@ -158,7 +170,6 @@ func (m *FollowerState) ExtendCertified(ctx context.Context, candidate *flow.Blo if err != nil || isDuplicate { return err } - deferredDbOps := transaction.NewDeferredDbOps() // sanity check if certifyingQC actually certifies candidate block if certifyingQC.View != candidate.Header.View { @@ -168,8 +179,9 @@ func (m *FollowerState) ExtendCertified(ctx context.Context, candidate *flow.Blo return fmt.Errorf("qc doesn't certify candidate block, expect %x blockID, got %x", blockID, certifyingQC.BlockID) } + deferredBlockPersist := deferred.NewDeferredBlockPersist() // check if the block header is a valid extension of parent block - err = m.headerExtend(ctx, candidate, certifyingQC, deferredDbOps) + err = m.headerExtend(ctx, candidate, certifyingQC, deferredBlockPersist) if err != nil { // since we have a QC for this block, it cannot be an invalid extension return fmt.Errorf("unexpected invalid block (id=%x) with certifying qc (id=%x): %s", @@ -177,65 +189,87 @@ func (m *FollowerState) ExtendCertified(ctx context.Context, candidate *flow.Blo } // find the last seal at the parent block - _, err = m.lastSealed(candidate, deferredDbOps) + latestSeal, err := m.lastSealed(candidate) if err != nil { return fmt.Errorf("failed to determine the lastest sealed block in fork: %w", err) } + deferredBlockPersist.AddNextOperation(func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return operation.IndexLatestSealAtBlock(lctx, rw.Writer(), blockID, latestSeal.ID()) + }) + // TODO: we might not need the deferred db updates, because the candidate passed into + // the Extend method has already been fully constructed. // evolve protocol state and verify consistency with commitment included in - err = m.evolveProtocolState(ctx, candidate, deferredDbOps) + err = m.evolveProtocolState(ctx, candidate, deferredBlockPersist) if err != nil { return fmt.Errorf("evolving protocol state failed: %w", err) } - // Execute the deferred database operations as one atomic transaction and emit scheduled notifications on success. - // The `candidate` block _must be valid_ (otherwise, the state will be corrupted)! - err = operation.RetryOnConflictTx(m.db, transaction.Update, deferredDbOps.Pending()) // No errors are expected during normal operations + lctx := m.lockManager.NewContext() + defer lctx.Release() + err = lctx.AcquireLock(storage.LockInsertBlock) if err != nil { - return fmt.Errorf("failed to persist candidate block %v and its dependencies: %w", blockID, err) + return err } - return nil + // Execute the deferred database operations as one atomic transaction and emit scheduled notifications on success. + // The `candidate` block _must be valid_ (otherwise, the state will be corrupted)! + // + // Note: The following database write is not concurrency-safe at the moment. If a candidate block is + // identified as a duplicate by `checkBlockAlreadyProcessed` in the beginning, `Extend` behaves as a no-op and + // gracefully returns. However, if two concurrent `Extend` calls with the same block pass the initial check + // for duplicates, both will eventually attempt to commit their deferred database operations. As documented + // in `headerExtend`, its deferred operations will abort the write batch with [storage.ErrAlreadyExists]. + // In this edge case of two concurrent calls with the same `candidate` block, `Extend` does not behave as + // an idempotent operation. + return m.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return deferredBlockPersist.Execute(lctx, blockID, rw) + }) } // Extend extends the protocol state of a CONSENSUS PARTICIPANT. It checks // the validity of the _entire block_ (header and full payload). // -// CAUTION: per convention, the protocol state requires that the candidate's -// parent has already been ingested. Otherwise, an exception is returned. +// CAUTION: +// - per convention, the protocol state requires that the candidate's +// PARENT has already been INGESTED. Otherwise, an exception is returned. +// - Attempts to extend the state with the _same block concurrently_ are not allowed. +// (will not corrupt the state, but may lead to an exception) +// - We reject orphaned blocks with [state.OutdatedExtensionError] ! +// This is more performant, but requires careful handling by the calling code. Specifically, +// the caller should not just drop orphaned blocks from the cache to avoid wasteful re-requests. +// If we were to entirely forget orphaned blocks, e.g. block X of the orphaned fork X ← Y ← Z, +// we might not have enough information to reject blocks Y, Z later if we receive them. We would +// re-request X, then determine it is orphaned and drop it, attempt to ingest Y re-request the +// unknown parent X and repeat potentially very often. // -// Per convention, the protocol state requires that the candidate's parent has already been ingested. -// Other than that, all valid extensions are accepted. Even if we have enough information to determine that -// a candidate block is already orphaned (e.g. its view is below the latest finalized view), it is important -// to accept it nevertheless to avoid spamming vulnerabilities. If a block is orphaned, consensus rules -// guarantee that there exists only a limited number of descendants which cannot increase anymore. So there -// is only a finite (generally small) amount of work to do accepting orphaned blocks and all their descendants. -// However, if we were to drop orphaned blocks, e.g. block X of the orphaned fork X <- Y <- Z, we might not -// have enough information to reject blocks Y, Z later if we receive them. We would re-request X, then -// determine it is orphaned and drop it, attempt to ingest Y re-request the unknown parent X and repeat -// potentially very often. +// To ensure that all ancestors of a candidate block are correct and known to the Protocol State, some external +// ordering and queuing of incoming blocks is generally necessary (responsibility of Compliance Layer). Once a block +// is successfully ingested, repeated extension requests with this block are no-ops. This is convenient for the +// Compliance Layer after a crash, so it doesn't have to worry about which blocks have already been ingested before +// the crash. However, while running it is very easy for the Compliance Layer to avoid concurrent extension requests +// with the same block. Hence, for simplicity, the Protocol State may reject such requests with an exception. // // Expected errors during normal operations: -// - state.OutdatedExtensionError if the candidate block is outdated (e.g. orphaned) +// - [state.OutdatedExtensionError] if the candidate block is orphaned // - state.InvalidExtensionError if the candidate block is invalid +// - In case of concurrent calls with the same `candidate` block, `Extend` may return a [storage.ErrAlreadyExists] +// or it may gracefully return. At the moment, `Extend` should be considered as not concurrency-safe. func (m *ParticipantState) Extend(ctx context.Context, candidate *flow.Block) error { span, ctx := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorExtend) defer span.End() // check if candidate block has been already processed - isDuplicate, err := m.checkBlockAlreadyProcessed(candidate.ID()) + blockID := candidate.ID() + isDuplicate, err := m.checkBlockAlreadyProcessed(blockID) if err != nil || isDuplicate { return err } - deferredDbOps := transaction.NewDeferredDbOps() - - // check if the block header is a valid extension of parent block - err = m.headerExtend(ctx, candidate, nil, deferredDbOps) - if err != nil { - return fmt.Errorf("header not compliant with chain state: %w", err) - } - // check if the block header is a valid extension of the finalized state + // The following function rejects the input block with an [state.OutdatedExtensionError] if and only if + // the block is orphaned or already finalized. If the block was to be finalized already, it would have been + // detected as already processed by the check above. Hence, `candidate` being orphaned is the only + // possible case to receive an [state.OutdatedExtensionError] here. err = m.checkOutdatedExtension(candidate.Header) if err != nil { if state.IsOutdatedExtensionError(err) { @@ -244,6 +278,14 @@ func (m *ParticipantState) Extend(ctx context.Context, candidate *flow.Block) er return fmt.Errorf("could not check if block is an outdated extension: %w", err) } + deferredBlockPersist := deferred.NewDeferredBlockPersist() + + // check if the block header is a valid extension of parent block + err = m.headerExtend(ctx, candidate, nil, deferredBlockPersist) + if err != nil { + return fmt.Errorf("header not compliant with chain state: %w", err) + } + // check if the guarantees in the payload is a valid extension of the finalized state err = m.guaranteeExtend(ctx, candidate) if err != nil { @@ -257,24 +299,37 @@ func (m *ParticipantState) Extend(ctx context.Context, candidate *flow.Block) er } // check if the seals in the payload is a valid extension of the finalized state - _, err = m.sealExtend(ctx, candidate, deferredDbOps) + _, err = m.sealExtend(ctx, candidate, deferredBlockPersist) if err != nil { return fmt.Errorf("payload seal(s) not compliant with chain state: %w", err) } // evolve protocol state and verify consistency with commitment included in payload - err = m.evolveProtocolState(ctx, candidate, deferredDbOps) + err = m.evolveProtocolState(ctx, candidate, deferredBlockPersist) if err != nil { return fmt.Errorf("evolving protocol state failed: %w", err) } - // Execute the deferred database operations and emit scheduled notifications on success. - // The `candidate` block _must be valid_ (otherwise, the state will be corrupted)! - err = operation.RetryOnConflictTx(m.db, transaction.Update, deferredDbOps.Pending()) // No errors are expected during normal operations + lctx := m.lockManager.NewContext() + defer lctx.Release() + err = lctx.AcquireLock(storage.LockInsertBlock) if err != nil { - return fmt.Errorf("failed to persist candiate block %v and its dependencies: %w", candidate.ID(), err) + return err } - return nil + + // Execute the deferred database operations and emit scheduled notifications on success. + // The `candidate` block _must be valid_ (otherwise, the state will be corrupted)! + // + // Note: The following database write is not concurrency-safe at the moment. If a candidate block is + // identified as a duplicate by `checkBlockAlreadyProcessed` in the beginning, `Extend` behaves as a no-op and + // gracefully returns. However, if two concurrent `Extend` calls with the same block pass the initial check + // for duplicates, both will eventually attempt to commit their deferred database operations. As documented + // in `headerExtend`, its deferred operations will abort the write batch with [storage.ErrAlreadyExists]. + // In this edge case of two concurrent calls with the same `candidate` block, `Extend` does not behave as + // an idempotent operation. + return m.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return deferredBlockPersist.Execute(lctx, blockID, rw) + }) } // headerExtend verifies the validity of the block header (excluding verification of the @@ -290,17 +345,27 @@ func (m *ParticipantState) Extend(ctx context.Context, candidate *flow.Block) er // If all checks pass, this method queues the following operations to persist the candidate block and // schedules `BlockProcessable` notification to be emitted in order of increasing height: // -// 5a. store QC embedded into the candidate block and emit `BlockProcessable` notification for the parent +// 5a. if and only if the candidate block's parent has not been certified yet: +// - store QC embedded into the candidate block +// - add the parent to the index of certified blocks (index: view → parent block's ID) +// - queue a `BlockProcessable` notification for the parent // 5b. store candidate block and index it as a child of its parent (needed for recovery to traverse unfinalized blocks) -// 5c. if we are given a certifyingQC, store it and queue a `BlockProcessable` notification for the candidate block +// 5c. if and only if we are given a `certifyingQC` +// - store this QC certifying the candidate block +// - add candidate to the index of certified blocks (index: view → candidate block's ID) +// - queue a `BlockProcessable` notification for the candidate block // // If `headerExtend` is called by `ParticipantState.Extend` (full consensus participant) then `certifyingQC` will be nil, // but the block payload will be validated. If `headerExtend` is called by `FollowerState.Extend` (consensus follower), -// then `certifyingQC` must be not nil which proves payload validity. +// then `certifyingQC` must be not nil, which proves payload validity. +// +// If the candidate block has already been ingested, the deferred database operations returned by this function call +// will error with the benign sentinel [storage.ErrAlreadyExists], aborting the database transaction (without corrupting +// the protocol state). // // Expected errors during normal operations: // - state.InvalidExtensionError if the candidate block is invalid -func (m *FollowerState) headerExtend(ctx context.Context, candidate *flow.Block, certifyingQC *flow.QuorumCertificate, deferredDbOps *transaction.DeferredDbOps) error { +func (m *FollowerState) headerExtend(ctx context.Context, candidate *flow.Block, certifyingQC *flow.QuorumCertificate, deferredBlockPersist *deferred.DeferredBlockPersist) error { span, _ := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorExtendCheckHeader) defer span.End() blockID := candidate.ID() @@ -353,43 +418,58 @@ func (m *FollowerState) headerExtend(ctx context.Context, candidate *flow.Block, } // STEP 5: - qc := candidate.Header.QuorumCertificate() - deferredDbOps.AddDbOp(func(tx *transaction.Tx) error { - // STEP 5a: Store QC for parent block and emit `BlockProcessable` notification if and only if - // - the QC for the parent has not been stored before (otherwise, we already emitted the notification) and - // - the parent block's height is larger than the finalized root height (the root block is already considered processed) - // Thereby, we reduce duplicated `BlockProcessable` notifications. - err := m.qcs.StoreTx(qc)(tx) + qc := candidate.Header.ParentQC() + deferredBlockPersist.AddNextOperation(func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + // STEP 5a: Deciding whether the candidate's parent has already been certified or not. + // Here, we populate the [storage.QuorumCertificates] index: certified block ID → QC. Except for bootstrapping, this is the + // only place where this index is updated. Therefore, the parent is certified if and only if [storage.QuorumCertificates] + // contains an entry for `qc.BlockID`. We optimistically attempt to add a new element to the index. We receive a + // [storage.ErrAlreadyExists] sentinel if and only if step 5a has already been executed for the parent. + err = m.qcs.BatchStore(lctx, rw, qc) if err != nil { + // [storage.ErrAlreadyExists] guarantees that 5a has already been executed for the parent. if !errors.Is(err, storage.ErrAlreadyExists) { return fmt.Errorf("could not store incorporated qc: %w", err) } - } else { + } else { // no error entails that 5a has never been executed for the parent block + // add parent to index of certified blocks: + err := operation.IndexCertifiedBlockByView(lctx, rw, parent.View, qc.BlockID) + if err != nil { + return fmt.Errorf("could not index certified block by view %v: %w", parent.View, err) + } + // trigger BlockProcessable for parent block above root height if parent.Height > m.finalizedRootHeight { - tx.OnSucceed(func() { + storage.OnCommitSucceed(rw, func() { m.consumer.BlockProcessable(parent, qc) }) } } // STEP 5b: Store candidate block and index it as a child of its parent (needed for recovery to traverse unfinalized blocks) - err = m.blocks.StoreTx(candidate)(tx) // insert the block into the database AND cache + err = m.blocks.BatchStore(lctx, rw, candidate) // insert the block into the database AND cache if err != nil { return fmt.Errorf("could not store candidate block: %w", err) } - err = transaction.WithTx(procedure.IndexNewBlock(blockID, candidate.Header.ParentID))(tx) + err = procedure.IndexNewBlock(lctx, rw, blockID, candidate.Header.ParentID) if err != nil { return fmt.Errorf("could not index new block: %w", err) } // STEP 5c: if we are given a certifyingQC, store it and queue a `BlockProcessable` notification for the candidate block if certifyingQC != nil { - err = m.qcs.StoreTx(certifyingQC)(tx) + err = m.qcs.BatchStore(lctx, rw, certifyingQC) if err != nil { return fmt.Errorf("could not store certifying qc: %w", err) } - tx.OnSucceed(func() { // queue a BlockProcessable event for candidate block, since it is certified + + // add candidate block to index of certified blocks: + err := operation.IndexCertifiedBlockByView(lctx, rw, candidate.Header.View, blockID) + if err != nil { + return fmt.Errorf("could not index certified block by view %v: %w", candidate.Header.View, err) + } + + storage.OnCommitSucceed(rw, func() { // queue a BlockProcessable event for candidate block, since it is certified m.consumer.BlockProcessable(candidate.Header, certifyingQC) }) } @@ -416,40 +496,61 @@ func (m *FollowerState) checkBlockAlreadyProcessed(blockID flow.Identifier) (boo return true, nil } -// checkOutdatedExtension checks whether given block is -// valid in the context of the entire state. For this, the block needs to -// directly connect, through its ancestors, to the last finalized block. +// checkOutdatedExtension rejects blocks that are either orphaned or already finalized, in which cases +// the sentinel [state.OutdatedExtensionError] is returned. Per convention, the ancestor blocks +// for any ingested block must be known (otherwise, we return an exception). +// +// APPROACH: +// Starting with `block`s parent, we walk the fork backwards in order of decreasing height. Eventually, +// we will reach a finalized block (this is always true, because a node starts with the genesis block +// or a root block that is known to be finalized and only accepts blocks that descend from this block). +// Let H denote the *latest* finalized height (in the implementation below called `finalizedHeight`). +// +// For `block.Height` > H, there are two cases: +// 1. When walking the fork backward, we reach the *latest* finalized block. Hence, `block` +// descends from the latest finalized block, i.e. it is not orphaned (yet). +// 2. We encounter a block at height H that is different from the latest finalized block. +// Therefore, our fork contains a block at height H that conflicts with the latest +// finalized block. Hence, `block` is orphaned. +// Example: +// A (Finalized) ← B (Finalized) ← C (Finalized) ← D ← E ← F +// ↖ G ↖ H ↖ I +// Block G is outdated, because its ancestry does not include C (latest finalized). +// Block H and I are not outdated, because they do have C as an ancestor. +// +// For `block.Height` ≤ H: +// - We emphasize that the traversal starts with `block`'s *parent*. Hence, the first block we +// visit when traversing the fork is at height `block.Height - 1` < H. Also in this case, our +// traversal reaches height H or below, _without_ encountering the latest finalized block. +// +// In summary, in the context of this function, we define a `block` to be OUTDATED if and only if +// `block` is orphaned or already finalized. +// // Expected errors during normal operations: -// - state.OutdatedExtensionError if the candidate block is outdated (e.g. orphaned) -func (m *ParticipantState) checkOutdatedExtension(header *flow.Header) error { - var finalizedHeight uint64 - err := m.db.View(operation.RetrieveFinalizedHeight(&finalizedHeight)) +// - [state.OutdatedExtensionError] if the candidate block is orphaned or finalized +func (m *ParticipantState) checkOutdatedExtension(block *flow.Header) error { + var latestFinalizedHeight uint64 + err := operation.RetrieveFinalizedHeight(m.db.Reader(), &latestFinalizedHeight) if err != nil { return fmt.Errorf("could not retrieve finalized height: %w", err) } var finalID flow.Identifier - err = m.db.View(operation.LookupBlockHeight(finalizedHeight, &finalID)) + err = operation.LookupBlockHeight(m.db.Reader(), latestFinalizedHeight, &finalID) if err != nil { return fmt.Errorf("could not lookup finalized block: %w", err) } - ancestorID := header.ParentID + ancestorID := block.ParentID for ancestorID != finalID { ancestor, err := m.headers.ByBlockID(ancestorID) if err != nil { - return fmt.Errorf("could not retrieve ancestor (%x): %w", ancestorID, err) - } - if ancestor.Height < finalizedHeight { - // this happens when the candidate block is on a fork that does not include all the - // finalized blocks. - // for instance: - // A (Finalized) <- B (Finalized) <- C (Finalized) <- D <- E <- F - // ^- G ^- H ^- I - // block G is not a valid block, because it does not have C (which has been finalized) as an ancestor - // block H and I are valid, because they do have C as an ancestor + return irrecoverable.NewExceptionf("could not retrieve ancestor %x: %w", ancestorID, err) + } + if ancestor.Height < latestFinalizedHeight { + // Candidate block is on a fork that does not include the latest finalized block. return state.NewOutdatedExtensionErrorf( "candidate block (height: %d) conflicts with finalized state (ancestor: %d final: %d)", - header.Height, ancestor.Height, finalizedHeight) + block.Height, ancestor.Height, latestFinalizedHeight) } ancestorID = ancestor.ParentID } @@ -543,7 +644,7 @@ func (m *ParticipantState) guaranteeExtend(ctx context.Context, candidate *flow. // operation for indexing the latest seal as of the candidate block and returns the latest seal. // Expected errors during normal operations: // - state.InvalidExtensionError if the candidate block has invalid seals -func (m *ParticipantState) sealExtend(ctx context.Context, candidate *flow.Block, deferredDbOps *transaction.DeferredDbOps) (*flow.Seal, error) { +func (m *ParticipantState) sealExtend(ctx context.Context, candidate *flow.Block, deferredBlockPersist *deferred.DeferredBlockPersist) (*flow.Seal, error) { span, _ := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorExtendCheckSeals) defer span.End() @@ -552,7 +653,10 @@ func (m *ParticipantState) sealExtend(ctx context.Context, candidate *flow.Block return nil, state.NewInvalidExtensionErrorf("seal validation error: %w", err) } - deferredDbOps.AddBadgerOp(operation.IndexLatestSealAtBlock(candidate.ID(), lastSeal.ID())) + deferredBlockPersist.AddNextOperation(func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return operation.IndexLatestSealAtBlock(lctx, rw.Writer(), blockID, lastSeal.ID()) + }) + return lastSeal, nil } @@ -585,19 +689,16 @@ func (m *ParticipantState) receiptExtend(ctx context.Context, candidate *flow.Bl return nil } -// lastSealed determines the highest sealed block from the fork with head `candidate`. -// It queues a deferred database operation for indexing the latest seal as of the candidate block. -// and returns the latest seal. +// lastSealed returns the highest sealed block from the fork with head `candidate`. // // For instance, here is the chain state: block 100 is the head, block 97 is finalized, // and 95 is the last sealed block at the state of block 100. // 95 (sealed) <- 96 <- 97 (finalized) <- 98 <- 99 <- 100 // Now, if block 101 is extending block 100, and its payload has a seal for 96, then it will -// be the last sealed for block 101. +// be the last sealed as of block 101. The result is independent of finalization. // No errors are expected during normal operation. -func (m *FollowerState) lastSealed(candidate *flow.Block, deferredDbOps *transaction.DeferredDbOps) (latestSeal *flow.Seal, err error) { +func (m *FollowerState) lastSealed(candidate *flow.Block) (latestSeal *flow.Seal, err error) { payload := candidate.Payload - blockID := candidate.ID() // If the candidate blocks' payload has no seals, the latest seal in this fork remains unchanged, i.e. latest seal as of the // parent is also the latest seal as of the candidate block. Otherwise, we take the latest seal included in the candidate block. @@ -622,7 +723,6 @@ func (m *FollowerState) lastSealed(candidate *flow.Block, deferredDbOps *transac latestSeal = ordered[len(ordered)-1] } - deferredDbOps.AddBadgerOp(operation.IndexLatestSealAtBlock(blockID, latestSeal.ID())) return latestSeal, nil } @@ -634,13 +734,13 @@ func (m *FollowerState) lastSealed(candidate *flow.Block, deferredDbOps *transac // Expected errors during normal operations: // - state.InvalidExtensionError if the Protocol State commitment in the candidate block does // not match the Protocol State we constructed locally -func (m *FollowerState) evolveProtocolState(ctx context.Context, candidate *flow.Block, deferredDbOps *transaction.DeferredDbOps) error { +func (m *FollowerState) evolveProtocolState(ctx context.Context, candidate *flow.Block, deferredBlockPersist *deferred.DeferredBlockPersist) error { span, _ := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorEvolveProtocolState) defer span.End() // Evolve the Protocol State starting from the parent block's state. Information that may change the state is: // the candidate block's view and Service Events from execution results sealed in the candidate block. - updatedStateID, dbUpdates, err := m.protocolState.EvolveState(candidate.Header.ParentID, candidate.Header.View, candidate.Payload.Seals) + updatedStateID, err := m.protocolState.EvolveState(deferredBlockPersist, candidate.Header.ParentID, candidate.Header.View, candidate.Payload.Seals) if err != nil { return fmt.Errorf("evolving protocol state failed: %w", err) } @@ -649,7 +749,7 @@ func (m *FollowerState) evolveProtocolState(ctx context.Context, candidate *flow if updatedStateID != candidate.Payload.ProtocolStateID { return state.NewInvalidExtensionErrorf("invalid protocol state commitment %x in block, which should be %x", candidate.Payload.ProtocolStateID, updatedStateID) } - deferredDbOps.AddDbOps(dbUpdates.Pending().WithBlock(candidate.ID())) + return nil } @@ -658,6 +758,13 @@ func (m *FollowerState) evolveProtocolState(ctx context.Context, candidate *flow // Hence, the parent of `blockID` has to be the last finalized block. // No errors are expected during normal operations. func (m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) error { + lctx := m.lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockFinalizeBlock) + if err != nil { + return err + } + // preliminaries: start tracer and retrieve full block span, _ := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorFinalize) defer span.End() @@ -677,12 +784,12 @@ func (m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) e // this must be the case, as the `Finalize` method only finalizes one block // at a time and hence the parent of `blockID` must already be finalized. var finalized uint64 - err = m.db.View(operation.RetrieveFinalizedHeight(&finalized)) + err = operation.RetrieveFinalizedHeight(m.db.Reader(), &finalized) if err != nil { return fmt.Errorf("could not retrieve finalized height: %w", err) } var finalID flow.Identifier - err = m.db.View(operation.LookupBlockHeight(finalized, &finalID)) + err = operation.LookupBlockHeight(m.db.Reader(), finalized, &finalID) if err != nil { return fmt.Errorf("could not retrieve final header: %w", err) } @@ -734,22 +841,22 @@ func (m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) e // This value could actually stay the same if it has no seals in // its payload, in which case the parent's seal is the same. // * set the epoch fallback flag, if it is triggered - err = operation.RetryOnConflict(m.db.Update, func(tx *badger.Txn) error { - err = operation.IndexBlockHeight(header.Height, blockID)(tx) + err = m.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err = operation.IndexFinalizedBlockByHeight(lctx, rw, header.Height, blockID) if err != nil { return fmt.Errorf("could not insert number mapping: %w", err) } - err = operation.UpdateFinalizedHeight(header.Height)(tx) + err = operation.UpsertFinalizedHeight(lctx, rw.Writer(), header.Height) if err != nil { return fmt.Errorf("could not update finalized height: %w", err) } - err = operation.UpdateSealedHeight(sealed.Height)(tx) + err = operation.UpsertSealedHeight(lctx, rw.Writer(), sealed.Height) if err != nil { return fmt.Errorf("could not update sealed height: %w", err) } if isFirstBlockOfEpoch(parentEpochState, finalizingEpochState) { - err = operation.InsertEpochFirstHeight(currentEpochSetup.Counter, header.Height)(tx) + err = operation.InsertEpochFirstHeight(lctx, rw, currentEpochSetup.Counter, header.Height) if err != nil { return fmt.Errorf("could not insert epoch first block height: %w", err) } @@ -759,7 +866,7 @@ func (m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) e // guarantees that only a single, continuous execution fork is sealed. Here, we index for // each block ID the ID of its _finalized_ seal. for _, seal := range block.Payload.Seals { - err = operation.IndexFinalizedSealByBlockID(seal.BlockID, seal.ID())(tx) + err = operation.IndexFinalizedSealByBlockID(rw.Writer(), seal.BlockID, seal.ID()) if err != nil { return fmt.Errorf("could not index the seal by the sealed block ID: %w", err) } @@ -768,7 +875,7 @@ func (m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) e if len(versionBeacons) > 0 { // only index the last version beacon as that is the relevant one. // TODO: The other version beacons can be used for validation. - err := operation.IndexVersionBeaconByHeight(versionBeacons[len(versionBeacons)-1])(tx) + err := operation.IndexVersionBeaconByHeight(rw.Writer(), versionBeacons[len(versionBeacons)-1]) if err != nil { return fmt.Errorf("could not index version beacon or height (%d): %w", header.Height, err) } diff --git a/state/protocol/badger/mutator_test.go b/state/protocol/badger/mutator_test.go index a41e1959318..6eb333290c1 100644 --- a/state/protocol/badger/mutator_test.go +++ b/state/protocol/badger/mutator_test.go @@ -34,9 +34,9 @@ import ( protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" "github.com/onflow/flow-go/state/protocol/util" "github.com/onflow/flow-go/storage" - stoerr "github.com/onflow/flow-go/storage" bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/deferred" + "github.com/onflow/flow-go/storage/operation" "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" @@ -48,28 +48,31 @@ func TestBootstrapValid(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) util.RunWithBootstrapState(t, rootSnapshot, func(db *badger.DB, state *protocol.State) { var finalized uint64 - err := db.View(operation.RetrieveFinalizedHeight(&finalized)) + err := operation.RetrieveFinalizedHeight(badgerimpl.ToDB(db).Reader(), &finalized) require.NoError(t, err) var sealed uint64 - err = db.View(operation.RetrieveSealedHeight(&sealed)) + bdb := badgerimpl.ToDB(db) + err = operation.RetrieveSealedHeight(bdb.Reader(), &sealed) require.NoError(t, err) var genesisID flow.Identifier - err = db.View(operation.LookupBlockHeight(0, &genesisID)) + err = operation.LookupBlockHeight(badgerimpl.ToDB(db).Reader(), 0, &genesisID) require.NoError(t, err) var header flow.Header - err = db.View(operation.RetrieveHeader(genesisID, &header)) + err = operation.RetrieveHeader(badgerimpl.ToDB(db).Reader(), genesisID, &header) require.NoError(t, err) + storagedb := badgerimpl.ToDB(db) + var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(genesisID, &sealID)) + err = operation.LookupLatestSealAtBlock(storagedb.Reader(), genesisID, &sealID) require.NoError(t, err) _, seal, err := rootSnapshot.SealedResult() require.NoError(t, err) - err = db.View(operation.RetrieveSeal(sealID, seal)) + err = operation.RetrieveSeal(storagedb.Reader(), sealID, seal) require.NoError(t, err) block, err := rootSnapshot.Head() @@ -87,6 +90,7 @@ func TestBootstrapValid(t *testing.T) { // * BlockProcessable is emitted when a block's child is inserted func TestExtendValid(t *testing.T) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() @@ -103,7 +107,8 @@ func TestExtendValid(t *testing.T) { state, err := protocol.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -150,6 +155,15 @@ func TestExtendValid(t *testing.T) { consumer.On("BlockProcessable", block1.Header, mock.Anything).Once() err := fullState.Extend(context.Background(), block2) require.NoError(t, err) + + // verify that block1's view is indexed as certified, because it has a child (block2) + var indexedID flow.Identifier + require.NoError(t, operation.LookupCertifiedBlockByView(badgerimpl.ToDB(db).Reader(), block1.Header.View, &indexedID)) + require.Equal(t, block1.ID(), indexedID) + + // verify that block2's view is not indexed as certified, because it has no children + err = operation.LookupCertifiedBlockByView(badgerimpl.ToDB(db).Reader(), block2.Header.View, &indexedID) + require.ErrorIs(t, err, storage.ErrNotFound) }) }) } @@ -238,7 +252,8 @@ func TestSealedIndex(t *testing.T) { require.NoError(t, err) metrics := metrics.NewNoopCollector() - seals := bstorage.NewSeals(metrics, db) + storedb := badgerimpl.ToDB(db) + seals := store.NewSeals(metrics, storedb) // can only find seal for G _, err = seals.FinalizedSealForBlock(rootHeader.ID()) @@ -534,44 +549,54 @@ func TestExtendMissingParent(t *testing.T) { require.False(t, st.IsInvalidExtensionError(err), err) require.False(t, st.IsOutdatedExtensionError(err), err) + storagedb := badgerimpl.ToDB(db) + // verify seal that was contained in candidate block is not indexed var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(extend.ID(), &sealID)) + err = operation.LookupLatestSealAtBlock(storagedb.Reader(), extend.ID(), &sealID) require.Error(t, err) - require.ErrorIs(t, err, stoerr.ErrNotFound) + require.ErrorIs(t, err, storage.ErrNotFound) }) } +// TestExtendHeightTooSmall tests the behaviour when attempting to extend the protocol state by a block +// whose height is not larger than its parent's height. The protocol mandates that the candidate's +// height is exactly one larger than its parent's height. Otherwise, an exception should be returned. func TestExtendHeightTooSmall(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { - head, err := rootSnapshot.Head() - require.NoError(t, err) + head, err := rootSnapshot.Head() + require.NoError(t, err) - extend := unittest.BlockFixture() - extend.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) - extend.Header.Height = 1 - extend.Header.View = 1 - extend.Header.ParentID = head.ID() - extend.Header.ParentView = head.View + // we create the following to descendants of head: + // head <- blockB <- blockC + // where blockB and blockC have exactly the same height + blockB := unittest.BlockWithParentFixture(head) // creates child with height one larger, but view possibly much larger + blockB.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) + blockB.Header.View = head.Height + 1 - err = state.Extend(context.Background(), &extend) - require.NoError(t, err) + blockC := unittest.BlockWithParentFixture(blockB.Header) // creates child with height one larger, but view possibly much larger + blockC.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) + blockC.Header.Height = blockB.Header.Height - // create another block with the same height and view, that is coming after - extend.Header.ParentID = extend.Header.ID() - extend.Header.Height = 1 - extend.Header.View = 2 + util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, chainState *protocol.ParticipantState) { + require.NoError(t, chainState.Extend(context.Background(), blockB)) - err = state.Extend(context.Background(), &extend) + err = chainState.Extend(context.Background(), blockC) require.Error(t, err) + require.True(t, st.IsInvalidExtensionError(err)) - // verify seal not indexed + // Whenever the state ingests a block, it indexes the latest seal as of this block. + // Therefore, we can use this as a check to confirm that blockB was successfully ingested, + // but the information from blockC was not. + storagedb := badgerimpl.ToDB(db) var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(extend.ID(), &sealID)) - require.Error(t, err) - require.ErrorIs(t, err, stoerr.ErrNotFound) + // latest seal for blockB should be found, as blockB was successfully ingested: + require.NoError(t, operation.LookupLatestSealAtBlock(storagedb.Reader(), blockB.ID(), &sealID)) + // latest seal for blockC should NOT be found, because extending the state with blockC errored: + require.ErrorIs(t, + operation.LookupLatestSealAtBlock(storagedb.Reader(), blockC.ID(), &sealID), + storage.ErrNotFound) }) } @@ -637,11 +662,13 @@ func TestExtendBlockNotConnected(t *testing.T) { err = state.Extend(context.Background(), extend) require.Error(t, err) + storagedb := badgerimpl.ToDB(db) + // verify seal not indexed var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(extend.ID(), &sealID)) + err = operation.LookupLatestSealAtBlock(storagedb.Reader(), extend.ID(), &sealID) require.Error(t, err) - require.ErrorIs(t, err, stoerr.ErrNotFound) + require.ErrorIs(t, err, storage.ErrNotFound) }) } @@ -704,7 +731,7 @@ func TestExtendReceiptsInvalid(t *testing.T) { head, err := rootSnapshot.Head() require.NoError(t, err) - // create block2 and block3 + // create block2 and block3 as descendants of head block2 := unittest.BlockWithParentFixture(head) block2.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) receipt := unittest.ReceiptForBlockFixture(block2) // receipt for block 2 @@ -825,6 +852,7 @@ func TestExtendEpochTransitionValid(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() // set up state and mock ComplianceMetrics object metrics := mockmodule.NewComplianceMetrics(t) @@ -843,7 +871,6 @@ func TestExtendEpochTransitionValid(t *testing.T) { require.NoError(t, err) metrics.On("CurrentEpochCounter", counter).Once() metrics.On("CurrentEpochPhase", initialPhase).Once() - metrics.On("CurrentEpochFinalView", finalView).Once() metrics.On("CurrentDKGPhaseViews", @@ -856,7 +883,8 @@ func TestExtendEpochTransitionValid(t *testing.T) { all := bstorage.InitAll(mmetrics.NewNoopCollector(), db) protoState, err := protocol.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -885,7 +913,7 @@ func TestExtendEpochTransitionValid(t *testing.T) { ) require.NoError(t, err) - mutableProtocolState := protocol_state.NewMutableProtocolState( + mutableState := protocol_state.NewMutableProtocolState( log, all.EpochProtocolStateEntries, all.ProtocolKVStore, @@ -895,12 +923,14 @@ func TestExtendEpochTransitionValid(t *testing.T) { all.Setups, all.EpochCommits, ) - expectedStateIdCalculator := calculateExpectedStateId(t, mutableProtocolState) + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) head, err := rootSnapshot.Head() require.NoError(t, err) result, _, err := rootSnapshot.SealedResult() require.NoError(t, err) + _, err = state.AtBlockID(head.ID()).Epochs().Current() + require.NoError(t, err) // we should begin the epoch in the staking phase phase, err := state.AtBlockID(head.ID()).EpochPhase() @@ -1144,13 +1174,18 @@ func TestExtendConflictingEpochEvents(t *testing.T) { result, _, err := rootSnapshot.SealedResult() require.NoError(t, err) + // In this test, we create two conflicting forks. To prevent accidentally creating byzantine scenarios, where + // multiple blocks have the same view, we keep track of used views and ensure that each new block has a unique view. + usedViews := make(map[uint64]struct{}) + usedViews[head.View] = struct{}{} + // add two conflicting blocks for each service event to reference - block1 := unittest.BlockWithParentFixture(head) + block1 := unittest.BlockWithParentAndUniqueView(head, usedViews) block1.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) err = state.Extend(context.Background(), block1) require.NoError(t, err) - block2 := unittest.BlockWithParentFixture(head) + block2 := unittest.BlockWithParentAndUniqueView(head, usedViews) block2.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) err = state.Extend(context.Background(), block2) require.NoError(t, err) @@ -1180,7 +1215,7 @@ func TestExtendConflictingEpochEvents(t *testing.T) { block1Receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{nextEpochSetup1.ServiceEvent()} // add block 1 receipt to block 3 payload - block3 := unittest.BlockWithParentFixture(block1.Header) + block3 := unittest.BlockWithParentAndUniqueView(block1.Header, usedViews) block3.SetPayload(flow.Payload{ Receipts: []*flow.ExecutionReceiptMeta{block1Receipt.Meta()}, Results: []*flow.ExecutionResult{&block1Receipt.ExecutionResult}, @@ -1194,7 +1229,7 @@ func TestExtendConflictingEpochEvents(t *testing.T) { block2Receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{nextEpochSetup2.ServiceEvent()} // add block 2 receipt to block 4 payload - block4 := unittest.BlockWithParentFixture(block2.Header) + block4 := unittest.BlockWithParentAndUniqueView(block2.Header, usedViews) block4.SetPayload(flow.Payload{ Receipts: []*flow.ExecutionReceiptMeta{block2Receipt.Meta()}, Results: []*flow.ExecutionResult{&block2Receipt.ExecutionResult}, @@ -1210,7 +1245,7 @@ func TestExtendConflictingEpochEvents(t *testing.T) { seals2 := []*flow.Seal{unittest.Seal.Fixture(unittest.Seal.WithResult(&block2Receipt.ExecutionResult))} // block 5 builds on block 3, contains seal for block 1 - block5 := unittest.BlockWithParentFixture(block3.Header) + block5 := unittest.BlockWithParentAndUniqueView(block3.Header, usedViews) block5.SetPayload(flow.Payload{ Seals: seals1, ProtocolStateID: expectedStateIdCalculator(block5.Header, seals1), @@ -1219,7 +1254,7 @@ func TestExtendConflictingEpochEvents(t *testing.T) { require.NoError(t, err) // block 6 builds on block 4, contains seal for block 2 - block6 := unittest.BlockWithParentFixture(block4.Header) + block6 := unittest.BlockWithParentAndUniqueView(block4.Header, usedViews) block6.SetPayload(flow.Payload{ Seals: seals2, ProtocolStateID: expectedStateIdCalculator(block6.Header, seals2), @@ -1228,12 +1263,12 @@ func TestExtendConflictingEpochEvents(t *testing.T) { require.NoError(t, err) // block 7 builds on block 5, contains QC for block 5 - block7 := unittest.BlockWithParentProtocolState(block5) + block7 := unittest.BlockWithParentProtocolStateAndUniqueView(block5, usedViews) err = state.Extend(context.Background(), block7) require.NoError(t, err) // block 8 builds on block 6, contains QC for block 6 - block8 := unittest.BlockWithParentProtocolState(block6) + block8 := unittest.BlockWithParentProtocolStateAndUniqueView(block6, usedViews) err = state.Extend(context.Background(), block8) require.NoError(t, err) @@ -1273,13 +1308,18 @@ func TestExtendDuplicateEpochEvents(t *testing.T) { result, _, err := rootSnapshot.SealedResult() require.NoError(t, err) + // In this test, we create two conflicting forks. To prevent accidentally creating byzantine scenarios, where + // multiple blocks have the same view, we keep track of used views and ensure that each new block has a unique view. + usedViews := make(map[uint64]struct{}) + usedViews[head.View] = struct{}{} + // add two conflicting blocks for each service event to reference - block1 := unittest.BlockWithParentFixture(head) + block1 := unittest.BlockWithParentAndUniqueView(head, usedViews) block1.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) err = state.Extend(context.Background(), block1) require.NoError(t, err) - block2 := unittest.BlockWithParentFixture(head) + block2 := unittest.BlockWithParentAndUniqueView(head, usedViews) block2.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) err = state.Extend(context.Background(), block2) require.NoError(t, err) @@ -1300,7 +1340,7 @@ func TestExtendDuplicateEpochEvents(t *testing.T) { block1Receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{nextEpochSetup.ServiceEvent()} // add block 1 receipt to block 3 payload - block3 := unittest.BlockWithParentFixture(block1.Header) + block3 := unittest.BlockWithParentAndUniqueView(block1.Header, usedViews) block3.SetPayload(unittest.PayloadFixture( unittest.WithReceipts(block1Receipt), unittest.WithProtocolStateID(rootProtocolStateID), @@ -1313,7 +1353,7 @@ func TestExtendDuplicateEpochEvents(t *testing.T) { block2Receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{nextEpochSetup.ServiceEvent()} // add block 2 receipt to block 4 payload - block4 := unittest.BlockWithParentFixture(block2.Header) + block4 := unittest.BlockWithParentAndUniqueView(block2.Header, usedViews) block4.SetPayload(unittest.PayloadFixture( unittest.WithReceipts(block2Receipt), unittest.WithProtocolStateID(rootProtocolStateID), @@ -1328,7 +1368,7 @@ func TestExtendDuplicateEpochEvents(t *testing.T) { seals2 := []*flow.Seal{unittest.Seal.Fixture(unittest.Seal.WithResult(&block2Receipt.ExecutionResult))} // block 5 builds on block 3, contains seal for block 1 - block5 := unittest.BlockWithParentFixture(block3.Header) + block5 := unittest.BlockWithParentAndUniqueView(block3.Header, usedViews) block5.SetPayload(flow.Payload{ Seals: seals1, ProtocolStateID: expectedStateIdCalculator(block5.Header, seals1), @@ -1337,7 +1377,7 @@ func TestExtendDuplicateEpochEvents(t *testing.T) { require.NoError(t, err) // block 6 builds on block 4, contains seal for block 2 - block6 := unittest.BlockWithParentFixture(block4.Header) + block6 := unittest.BlockWithParentAndUniqueView(block4.Header, usedViews) block6.SetPayload(flow.Payload{ Seals: seals2, ProtocolStateID: expectedStateIdCalculator(block6.Header, seals2), @@ -1346,13 +1386,13 @@ func TestExtendDuplicateEpochEvents(t *testing.T) { require.NoError(t, err) // block 7 builds on block 5, contains QC for block 5 - block7 := unittest.BlockWithParentProtocolState(block5) + block7 := unittest.BlockWithParentProtocolStateAndUniqueView(block5, usedViews) err = state.Extend(context.Background(), block7) require.NoError(t, err) // block 8 builds on block 6, contains QC for block 6 // at this point we are inserting the duplicate EpochSetup, should not error - block8 := unittest.BlockWithParentProtocolState(block6) + block8 := unittest.BlockWithParentProtocolStateAndUniqueView(block6, usedViews) err = state.Extend(context.Background(), block8) require.NoError(t, err) @@ -1376,7 +1416,7 @@ func TestExtendEpochSetupInvalid(t *testing.T) { // setupState initializes the protocol state for a test case // * creates and finalizes a new block for the first seal to reference // * creates a factory method for test cases to generated valid EpochSetup events - setupState := func(t *testing.T, db *badger.DB, state *protocol.ParticipantState) ( + setupState := func(t *testing.T, _ *badger.DB, state *protocol.ParticipantState) ( *flow.Block, func(...func(*flow.EpochSetup)) (*flow.EpochSetup, *flow.ExecutionReceipt, *flow.Seal), ) { @@ -1532,7 +1572,7 @@ func TestExtendEpochCommitInvalid(t *testing.T) { // swap consensus node for a new one for epoch 2 epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)) epoch2Participants := append( - participants.Filter(filter.Not[flow.Identity](filter.HasRole[flow.Identity](flow.RoleConsensus))), + participants.Filter(filter.Not(filter.HasRole[flow.Identity](flow.RoleConsensus))), epoch2NewParticipant, ).Sort(flow.Canonical[flow.Identity]).ToSkeleton() @@ -1707,7 +1747,7 @@ func TestEpochFallbackMode(t *testing.T) { protoEventsMock.On("BlockFinalized", mock.Anything) protoEventsMock.On("BlockProcessable", mock.Anything, mock.Anything) - util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db *badger.DB, state *protocol.ParticipantState, mutableProtocolState realprotocol.MutableProtocolState) { + util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db *badger.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { head, err := rootSnapshot.Head() require.NoError(t, err) result, _, err := rootSnapshot.SealedResult() @@ -1718,7 +1758,7 @@ func TestEpochFallbackMode(t *testing.T) { safetyThreshold := rootProtocolState.GetFinalizationSafetyThreshold() require.GreaterOrEqual(t, epochExtensionViewCount, safetyThreshold, "epoch extension view count must be at least as large as safety threshold") - expectedStateIdCalculator := calculateExpectedStateId(t, mutableProtocolState) + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) epoch1Setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) epoch1FinalView := epoch1Setup.FinalView @@ -2002,13 +2042,13 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { protoEventsMock.On("BlockFinalized", mock.Anything) protoEventsMock.On("BlockProcessable", mock.Anything, mock.Anything) - util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db *badger.DB, state *protocol.ParticipantState, mutableProtocolState realprotocol.MutableProtocolState) { + util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db *badger.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { head, err := rootSnapshot.Head() require.NoError(t, err) rootResult, _, err := rootSnapshot.SealedResult() require.NoError(t, err) - expectedStateIdCalculator := calculateExpectedStateId(t, mutableProtocolState) + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) // add a block for the first seal to reference block1 := unittest.BlockWithParentFixture(head) @@ -2030,7 +2070,7 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { receipt, seal := unittest.ReceiptAndSealForBlock(block1, invalidSetup.ServiceEvent()) // ingesting block 2 and 3, block 3 seals the invalid setup event - block2, block3 := unittest.SealBlock(t, state, mutableProtocolState, block1, receipt, seal) + block2, block3 := unittest.SealBlock(t, state, mutableState, block1, receipt, seal) assertEpochFallbackTriggered(t, state.AtBlockID(block2.ID()), false) // EFM shouldn't be triggered since block 2 only incorporates the event, sealing happens in block 3 assertEpochFallbackTriggered(t, state.AtBlockID(block3.ID()), true) // EFM has to be triggered at block 3, since it seals the invalid setup event assertEpochFallbackTriggered(t, state.Final(), false) // EFM should still not be triggered for finalized state since the invalid service event does not have a finalized seal @@ -2060,7 +2100,7 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { receipt, seal = unittest.ReceiptAndSealForBlock(block2, epochRecover.ServiceEvent()) // ingesting block 4 and 5, block 5 seals the EpochRecover event - block4, block5 := unittest.SealBlock(t, state, mutableProtocolState, block3, receipt, seal) + block4, block5 := unittest.SealBlock(t, state, mutableState, block3, receipt, seal) assertEpochFallbackTriggered(t, state.AtBlockID(block4.ID()), true) assertEpochFallbackTriggered(t, state.AtBlockID(block5.ID()), false) assertEpochFallbackTriggered(t, state.Final(), true) // the latest finalized state should still be in EFM as `epochRecover` event does not have a finalized seal @@ -2095,13 +2135,13 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { protoEventsMock.On("BlockFinalized", mock.Anything) protoEventsMock.On("BlockProcessable", mock.Anything, mock.Anything) - util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db *badger.DB, state *protocol.ParticipantState, mutableProtocolState realprotocol.MutableProtocolState) { + util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db *badger.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { head, err := rootSnapshot.Head() require.NoError(t, err) rootResult, _, err := rootSnapshot.SealedResult() require.NoError(t, err) - expectedStateIdCalculator := calculateExpectedStateId(t, mutableProtocolState) + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) // add a block for the first seal to reference block1 := unittest.BlockWithParentFixture(head) @@ -2124,7 +2164,7 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { receipt, seal := unittest.ReceiptAndSealForBlock(block1, epoch2Setup.ServiceEvent()) // ingesting block 2 and 3, block 3 seals the EpochSetup event - block2, block3 := unittest.SealBlock(t, state, mutableProtocolState, block1, receipt, seal) + block2, block3 := unittest.SealBlock(t, state, mutableState, block1, receipt, seal) err = state.Finalize(context.Background(), block2.ID()) require.NoError(t, err) @@ -2141,7 +2181,7 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { receipt, seal = unittest.ReceiptAndSealForBlock(block2, invalidEpochCommit.ServiceEvent()) // ingesting block 4 and 5, block 5 seals the invalid commit event - block4, block5 := unittest.SealBlock(t, state, mutableProtocolState, block3, receipt, seal) + block4, block5 := unittest.SealBlock(t, state, mutableState, block3, receipt, seal) assertEpochFallbackTriggered(t, state.AtBlockID(block4.ID()), false) // EFM shouldn't be triggered since block 4 only incorporates the event, sealing happens in block 5 assertEpochFallbackTriggered(t, state.AtBlockID(block5.ID()), true) // EFM has to be triggered at block 5, since it seals the invalid commit event assertEpochFallbackTriggered(t, state.Final(), false) // EFM should still not be triggered for finalized state since the invalid service event does not have a finalized seal @@ -2170,7 +2210,7 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { receipt, seal = unittest.ReceiptAndSealForBlock(block3, epochRecover.ServiceEvent()) // ingesting block 6 and 7, block 7 seals the `epochRecover` event - block6, block7 := unittest.SealBlock(t, state, mutableProtocolState, block5, receipt, seal) + block6, block7 := unittest.SealBlock(t, state, mutableState, block5, receipt, seal) assertEpochFallbackTriggered(t, state.AtBlockID(block6.ID()), true) assertEpochFallbackTriggered(t, state.AtBlockID(block7.ID()), false) assertEpochFallbackTriggered(t, state.Final(), true) // the latest finalized state should still be in EFM as `epochRecover` event does not have a finalized seal @@ -2193,7 +2233,7 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { }) }) - // Entering EFM in the commit phase is the most complex case since we can't revert an already committed epoch. In this case,x + // Entering EFM in the commit phase is the most complex case since we can't revert an already committed epoch. In this case, // we proceed as follows: // - We build valid EpochSetup and EpochCommit events for the next epoch, effectively moving the protocol to the EpochCommit phase. // - Next, we incorporate an invalid EpochCommit event, which will trigger EFM. @@ -2221,7 +2261,7 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { protoEventsMock.On("BlockFinalized", mock.Anything) protoEventsMock.On("BlockProcessable", mock.Anything, mock.Anything) - util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db *badger.DB, state *protocol.ParticipantState, mutableProtocolState realprotocol.MutableProtocolState) { + util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db *badger.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { head, err := rootSnapshot.Head() require.NoError(t, err) rootResult, _, err := rootSnapshot.SealedResult() @@ -2232,7 +2272,7 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { safetyThreshold := rootProtocolState.GetFinalizationSafetyThreshold() require.GreaterOrEqual(t, epochExtensionViewCount, safetyThreshold, "epoch extension view count must be at least as large as safety threshold") - expectedStateIdCalculator := calculateExpectedStateId(t, mutableProtocolState) + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) // Constructing blocks // ... <- B1 <- B2(ER(B1, EpochSetup)) <- B3(S(ER(B1))) <- B4(ER(B2, EpochCommit)) <- B5(S(ER(B2))) <- ... @@ -2262,7 +2302,7 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { receipt, seal := unittest.ReceiptAndSealForBlock(block1, epoch2Setup.ServiceEvent()) // ingesting block 2 and 3, block 3 seals the `epochSetup` for the next epoch - block2, block3 := unittest.SealBlock(t, state, mutableProtocolState, block1, receipt, seal) + block2, block3 := unittest.SealBlock(t, state, mutableState, block1, receipt, seal) err = state.Finalize(context.Background(), block2.ID()) require.NoError(t, err) @@ -2282,7 +2322,7 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { receipt, seal = unittest.ReceiptAndSealForBlock(block2, epoch2Commit.ServiceEvent()) // ingesting block 4 and 5, block 5 seals the `epochCommit` for the next epoch - block4, block5 := unittest.SealBlock(t, state, mutableProtocolState, block3, receipt, seal) + block4, block5 := unittest.SealBlock(t, state, mutableState, block3, receipt, seal) err = state.Finalize(context.Background(), block4.ID()) require.NoError(t, err) @@ -2307,7 +2347,7 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { receipt, seal = unittest.ReceiptAndSealForBlock(block3, invalidCommit.ServiceEvent()) // seal B3 by building two blocks on top of B5 that contain ER and seal respectively - block6, block7 := unittest.SealBlock(t, state, mutableProtocolState, block5, receipt, seal) + block6, block7 := unittest.SealBlock(t, state, mutableState, block5, receipt, seal) assertEpochFallbackTriggered(t, state.AtBlockID(block6.ID()), false) // EFM shouldn't be triggered since block 6 only incorporates the event, sealing happens in block 7 assertEpochFallbackTriggered(t, state.AtBlockID(block7.ID()), true) // EFM has to be triggered at block 7, since it seals the invalid commit event assertEpochFallbackTriggered(t, state.Final(), false) // EFM should still not be triggered for finalized state since the invalid service event does not have a finalized seal @@ -2394,7 +2434,7 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { receipt, seal = unittest.ReceiptAndSealForBlock(block4, epochRecover.ServiceEvent()) // ingesting block 11 and 12, block 12 seals the `epochRecover` event - block11, block12 := unittest.SealBlock(t, state, mutableProtocolState, block10, receipt, seal) + block11, block12 := unittest.SealBlock(t, state, mutableState, block10, receipt, seal) assertEpochFallbackTriggered(t, state.AtBlockID(block11.ID()), true) assertEpochFallbackTriggered(t, state.AtBlockID(block12.ID()), false) assertEpochFallbackTriggered(t, state.Final(), true) // the latest finalized state should still be in EFM as `epochRecover` event does not have a finalized seal @@ -2457,7 +2497,7 @@ func TestRecoveryFromEpochFallbackMode(t *testing.T) { // We assume we keep the same view duration for all views in the epoch and all extensions. func TestEpochTargetEndTime(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState, mutableProtocolState realprotocol.MutableProtocolState) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { head, err := rootSnapshot.Head() require.NoError(t, err) rootResult, _, err := rootSnapshot.SealedResult() @@ -2469,7 +2509,7 @@ func TestEpochTargetEndTime(t *testing.T) { rootTargetEndTime := currentEpoch.TargetEndTime() require.Equal(t, epoch1Setup.TargetEndTime, rootTargetEndTime) - expectedStateIdCalculator := calculateExpectedStateId(t, mutableProtocolState) + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) // add a block that will trigger EFM and add an epoch extension since the view of the epoch exceeds the safety threshold block1 := unittest.BlockWithParentFixture(head) @@ -2515,7 +2555,7 @@ func TestEpochTargetEndTime(t *testing.T) { // We assume we keep the same view duration for all views in the epoch and all extensions. func TestEpochTargetDuration(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState, mutableProtocolState realprotocol.MutableProtocolState) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { head, err := rootSnapshot.Head() require.NoError(t, err) rootResult, _, err := rootSnapshot.SealedResult() @@ -2527,7 +2567,7 @@ func TestEpochTargetDuration(t *testing.T) { rootTargetDuration := currentEpoch.TargetDuration() require.Equal(t, epoch1Setup.TargetDuration, rootTargetDuration) - expectedStateIdCalculator := calculateExpectedStateId(t, mutableProtocolState) + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) // add a block that will trigger EFM and add an epoch extension since the view of the epoch exceeds the safety threshold block1 := unittest.BlockWithParentFixture(head) @@ -2567,6 +2607,7 @@ func TestEpochTargetDuration(t *testing.T) { func TestExtendInvalidSealsInBlock(t *testing.T) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() @@ -2583,7 +2624,8 @@ func TestExtendInvalidSealsInBlock(t *testing.T) { state, err := protocol.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -2694,11 +2736,13 @@ func TestHeaderExtendMissingParent(t *testing.T) { require.Error(t, err) require.False(t, st.IsInvalidExtensionError(err), err) + storagedb := badgerimpl.ToDB(db) + // verify seal not indexed var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(extend.ID(), &sealID)) + err = operation.LookupLatestSealAtBlock(storagedb.Reader(), extend.ID(), &sealID) require.Error(t, err) - require.ErrorIs(t, err, stoerr.ErrNotFound) + require.ErrorIs(t, err, storage.ErrNotFound) }) } @@ -2719,16 +2763,18 @@ func TestHeaderExtendHeightTooSmall(t *testing.T) { block2 := unittest.BlockWithParentFixture(block1.Header) block2.Header.Height = block1.Header.Height - err = state.ExtendCertified(context.Background(), block1, block2.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), block1, block2.Header.ParentQC()) require.NoError(t, err) err = state.ExtendCertified(context.Background(), block2, unittest.CertifyBlock(block2.Header)) require.False(t, st.IsInvalidExtensionError(err)) + storagedb := badgerimpl.ToDB(db) + // verify seal not indexed var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(block2.ID(), &sealID)) - require.ErrorIs(t, err, stoerr.ErrNotFound) + err = operation.LookupLatestSealAtBlock(storagedb.Reader(), block2.ID(), &sealID) + require.ErrorIs(t, err, storage.ErrNotFound) }) } @@ -2763,8 +2809,8 @@ func TestExtendBlockProcessable(t *testing.T) { grandChild := unittest.BlockWithParentProtocolState(child) // extend block using certifying QC, expect that BlockProcessable will be emitted once - consumer.On("BlockProcessable", block.Header, child.Header.QuorumCertificate()).Once() - err := state.ExtendCertified(context.Background(), block, child.Header.QuorumCertificate()) + consumer.On("BlockProcessable", block.Header, child.Header.ParentQC()).Once() + err = state.ExtendCertified(context.Background(), block, child.Header.ParentQC()) require.NoError(t, err) // extend block without certifying QC, expect that BlockProcessable won't be called @@ -2775,7 +2821,7 @@ func TestExtendBlockProcessable(t *testing.T) { // extend block using certifying QC, expect that BlockProcessable will be emitted twice. // One for parent block and second for current block. grandChildCertifyingQC := unittest.CertifyBlock(grandChild.Header) - consumer.On("BlockProcessable", child.Header, grandChild.Header.QuorumCertificate()).Once() + consumer.On("BlockProcessable", child.Header, grandChild.Header.ParentQC()).Once() consumer.On("BlockProcessable", grandChild.Header, grandChildCertifyingQC).Once() err = state.ExtendCertified(context.Background(), grandChild, grandChildCertifyingQC) require.NoError(t, err) @@ -2794,7 +2840,12 @@ func TestFollowerHeaderExtendBlockNotConnected(t *testing.T) { head, err := rootSnapshot.Head() require.NoError(t, err) - block1 := unittest.BlockWithParentFixture(head) + // In this test, we create two conflicting forks. To prevent accidentally creating byzantine scenarios, where + // multiple blocks have the same view, we keep track of used views and ensure that each new block has a unique view. + usedViews := make(map[uint64]struct{}) + usedViews[head.View] = struct{}{} + + block1 := unittest.BlockWithParentAndUniqueView(head, usedViews) block1.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) err = state.ExtendCertified(context.Background(), block1, unittest.CertifyBlock(block1.Header)) require.NoError(t, err) @@ -2803,14 +2854,16 @@ func TestFollowerHeaderExtendBlockNotConnected(t *testing.T) { require.NoError(t, err) // create a fork at view/height 1 and try to connect it to root - block2 := unittest.BlockWithParentFixture(head) + block2 := unittest.BlockWithParentAndUniqueView(head, usedViews) block2.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) err = state.ExtendCertified(context.Background(), block2, unittest.CertifyBlock(block2.Header)) require.NoError(t, err) + storagedb := badgerimpl.ToDB(db) + // verify seal not indexed var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(block2.ID(), &sealID)) + err = operation.LookupLatestSealAtBlock(storagedb.Reader(), block2.ID(), &sealID) require.NoError(t, err) }) } @@ -2827,7 +2880,12 @@ func TestParticipantHeaderExtendBlockNotConnected(t *testing.T) { head, err := rootSnapshot.Head() require.NoError(t, err) - block1 := unittest.BlockWithParentFixture(head) + // In this test, we create two conflicting forks. To prevent accidentally creating byzantine scenarios, where + // multiple blocks have the same view, we keep track of used views and ensure that each new block has a unique view. + usedViews := make(map[uint64]struct{}) + usedViews[head.View] = struct{}{} + + block1 := unittest.BlockWithParentAndUniqueView(head, usedViews) block1.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) err = state.Extend(context.Background(), block1) require.NoError(t, err) @@ -2836,15 +2894,17 @@ func TestParticipantHeaderExtendBlockNotConnected(t *testing.T) { require.NoError(t, err) // create a fork at view/height 1 and try to connect it to root - block2 := unittest.BlockWithParentFixture(head) + block2 := unittest.BlockWithParentAndUniqueView(head, usedViews) block2.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) err = state.Extend(context.Background(), block2) require.True(t, st.IsOutdatedExtensionError(err), err) + storagedb := badgerimpl.ToDB(db) + // verify seal not indexed var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(block2.ID(), &sealID)) - require.ErrorIs(t, err, stoerr.ErrNotFound) + err = operation.LookupLatestSealAtBlock(storagedb.Reader(), block2.ID(), &sealID) + require.ErrorIs(t, err, storage.ErrNotFound) }) } @@ -2859,8 +2919,7 @@ func TestHeaderExtendHighestSeal(t *testing.T) { block2.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) block3 := unittest.BlockWithParentProtocolState(block2) - - err := state.ExtendCertified(context.Background(), block2, block3.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), block2, block3.Header.ParentQC()) require.NoError(t, err) // create receipts and seals for block2 and block3 @@ -2884,10 +2943,10 @@ func TestHeaderExtendHighestSeal(t *testing.T) { unittest.WithProtocolStateID(rootProtocolStateID), )) - err = state.ExtendCertified(context.Background(), block3, block4.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), block3, block4.Header.ParentQC()) require.NoError(t, err) - err = state.ExtendCertified(context.Background(), block4, block5.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), block4, block5.Header.ParentQC()) require.NoError(t, err) err = state.ExtendCertified(context.Background(), block5, unittest.CertifyBlock(block5.Header)) @@ -2944,7 +3003,12 @@ func TestExtendInvalidGuarantee(t *testing.T) { validSignerIndices, err := signature.EncodeSignersToIndices(all, all) require.NoError(t, err) - block := unittest.BlockWithParentFixture(head) + // In this test, we create two conflicting forks. To prevent accidentally creating byzantine scenarios, where + // multiple blocks have the same view, we keep track of used views and ensure that each new block has a unique view. + usedViews := make(map[uint64]struct{}) + usedViews[head.View] = struct{}{} + + block := unittest.BlockWithParentAndUniqueView(head, usedViews) payload := flow.Payload{ Guarantees: []*flow.CollectionGuarantee{ { @@ -2967,7 +3031,7 @@ func TestExtendInvalidGuarantee(t *testing.T) { payload.Guarantees[0].SignerIndices = []byte{byte(1)} // create new block that has invalid collection guarantee - block = unittest.BlockWithParentFixture(head) + block = unittest.BlockWithParentAndUniqueView(head, usedViews) block.SetPayload(payload) err = state.Extend(context.Background(), block) @@ -3055,7 +3119,7 @@ func TestSealed(t *testing.T) { unittest.WithProtocolStateID(rootProtocolStateID), )) - err = state.ExtendCertified(context.Background(), block1, block2.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), block1, block2.Header.ParentQC()) require.NoError(t, err) err = state.Finalize(context.Background(), block1.ID()) require.NoError(t, err) @@ -3067,7 +3131,7 @@ func TestSealed(t *testing.T) { ProtocolStateID: rootProtocolStateID, }) - err = state.ExtendCertified(context.Background(), block2, block3.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), block2, block3.Header.ParentQC()) require.NoError(t, err) err = state.Finalize(context.Background(), block2.ID()) require.NoError(t, err) @@ -3103,9 +3167,9 @@ func TestCacheAtomicity(t *testing.T) { var wg sync.WaitGroup wg.Add(1) go func(blockID flow.Identifier) { - for i := 0; i < 100; i++ { + for range 100 { _, err := headers.ByBlockID(blockID) - if errors.Is(err, stoerr.ErrNotFound) { + if errors.Is(err, storage.ErrNotFound) { continue } require.NoError(t, err) @@ -3127,6 +3191,7 @@ func TestCacheAtomicity(t *testing.T) { // TestHeaderInvalidTimestamp tests that extending header with invalid timestamp results in sentinel error func TestHeaderInvalidTimestamp(t *testing.T) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() @@ -3144,7 +3209,8 @@ func TestHeaderInvalidTimestamp(t *testing.T) { state, err := protocol.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -3258,9 +3324,9 @@ func getRootProtocolStateID(t *testing.T, rootSnapshot *inmem.Snapshot) flow.Ide } // calculateExpectedStateId is a utility function which makes easier to get expected protocol state ID after applying service events contained in seals. -func calculateExpectedStateId(t *testing.T, mutableProtocolState realprotocol.MutableProtocolState) func(header *flow.Header, seals []*flow.Seal) flow.Identifier { +func calculateExpectedStateId(t *testing.T, mutableState realprotocol.MutableProtocolState) func(header *flow.Header, seals []*flow.Seal) flow.Identifier { return func(header *flow.Header, seals []*flow.Seal) flow.Identifier { - expectedStateID, _, err := mutableProtocolState.EvolveState(header.ParentID, header.View, seals) + expectedStateID, err := mutableState.EvolveState(deferred.NewDeferredBlockPersist(), header.ParentID, header.View, seals) require.NoError(t, err) return expectedStateID } diff --git a/state/protocol/badger/params.go b/state/protocol/badger/params.go deleted file mode 100644 index 9da21111ade..00000000000 --- a/state/protocol/badger/params.go +++ /dev/null @@ -1,173 +0,0 @@ -package badger - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/state/protocol/inmem" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" -) - -type Params struct { - protocol.GlobalParams - protocol.InstanceParams -} - -var _ protocol.Params = (*Params)(nil) - -// InstanceParams implements the interface protocol.InstanceParams. All functions -// are served on demand directly from the database, _without_ any caching. -type InstanceParams struct { - db *badger.DB - // finalizedRoot marks the cutoff of the history this node knows about. It is the block at the tip - // of the root snapshot used to bootstrap this node - all newer blocks are synced from the network. - finalizedRoot *flow.Header - // sealedRoot is the latest sealed block with respect to `finalizedRoot`. - sealedRoot *flow.Header - // rootSeal is the seal for block `sealedRoot` - the newest incorporated seal with respect to `finalizedRoot`. - rootSeal *flow.Seal -} - -var _ protocol.InstanceParams = (*InstanceParams)(nil) - -// ReadInstanceParams reads the instance parameters from the database and returns them as in-memory representation. -// No errors are expected during normal operation. -func ReadInstanceParams(db *badger.DB, headers storage.Headers, seals storage.Seals) (*InstanceParams, error) { - params := &InstanceParams{ - db: db, - } - - // in next section we will read data from the database and cache them, - // as they are immutable for the runtime of the node. - err := db.View(func(txn *badger.Txn) error { - var ( - finalizedRootHeight uint64 - sealedRootHeight uint64 - ) - - // root height - err := db.View(operation.RetrieveRootHeight(&finalizedRootHeight)) - if err != nil { - return fmt.Errorf("could not read root block to populate cache: %w", err) - } - // sealed root height - err = db.View(operation.RetrieveSealedRootHeight(&sealedRootHeight)) - if err != nil { - return fmt.Errorf("could not read sealed root block to populate cache: %w", err) - } - - // look up 'finalized root block' - var finalizedRootID flow.Identifier - err = db.View(operation.LookupBlockHeight(finalizedRootHeight, &finalizedRootID)) - if err != nil { - return fmt.Errorf("could not look up finalized root height: %w", err) - } - params.finalizedRoot, err = headers.ByBlockID(finalizedRootID) - if err != nil { - return fmt.Errorf("could not retrieve finalized root header: %w", err) - } - - // look up the sealed block as of the 'finalized root block' - var sealedRootID flow.Identifier - err = db.View(operation.LookupBlockHeight(sealedRootHeight, &sealedRootID)) - if err != nil { - return fmt.Errorf("could not look up sealed root height: %w", err) - } - params.sealedRoot, err = headers.ByBlockID(sealedRootID) - if err != nil { - return fmt.Errorf("could not retrieve sealed root header: %w", err) - } - - // retrieve the root seal - params.rootSeal, err = seals.HighestInFork(finalizedRootID) - if err != nil { - return fmt.Errorf("could not retrieve root seal: %w", err) - } - - return nil - }) - if err != nil { - return nil, fmt.Errorf("could not read InstanceParams data to populate cache: %w", err) - } - - return params, nil -} - -// FinalizedRoot returns the finalized root header of the current protocol state. This will be -// the head of the protocol state snapshot used to bootstrap this state and -// may differ from node to node for the same protocol state. -func (p *InstanceParams) FinalizedRoot() *flow.Header { - return p.finalizedRoot -} - -// SealedRoot returns the sealed root block. If it's different from FinalizedRoot() block, -// it means the node is bootstrapped from mid-spork. -func (p *InstanceParams) SealedRoot() *flow.Header { - return p.sealedRoot -} - -// Seal returns the root block seal of the current protocol state. This is the seal for the -// `SealedRoot` block that was used to bootstrap this state. It may differ from node to node. -func (p *InstanceParams) Seal() *flow.Seal { - return p.rootSeal -} - -// ReadGlobalParams reads the global parameters from the database and returns them as in-memory representation. -// No errors are expected during normal operation. -func ReadGlobalParams(db *badger.DB) (*inmem.Params, error) { - var sporkID flow.Identifier - err := db.View(operation.RetrieveSporkID(&sporkID)) - if err != nil { - return nil, fmt.Errorf("could not get spork id: %w", err) - } - - var sporkRootBlockHeight uint64 - err = db.View(operation.RetrieveSporkRootBlockHeight(&sporkRootBlockHeight)) - if err != nil { - return nil, fmt.Errorf("could not get spork root block height: %w", err) - } - - root, err := ReadFinalizedRoot(db) // retrieve root header - if err != nil { - return nil, fmt.Errorf("could not get root: %w", err) - } - - return inmem.NewParams( - inmem.EncodableParams{ - ChainID: root.ChainID, - SporkID: sporkID, - SporkRootBlockHeight: sporkRootBlockHeight, - }, - ), nil -} - -// ReadFinalizedRoot retrieves the root block's header from the database. -// This information is immutable for the runtime of the software and may be cached. -func ReadFinalizedRoot(db *badger.DB) (*flow.Header, error) { - var finalizedRootHeight uint64 - var rootID flow.Identifier - var rootHeader flow.Header - err := db.View(func(tx *badger.Txn) error { - err := operation.RetrieveRootHeight(&finalizedRootHeight)(tx) - if err != nil { - return fmt.Errorf("could not retrieve finalized root height: %w", err) - } - err = operation.LookupBlockHeight(finalizedRootHeight, &rootID)(tx) // look up root block ID - if err != nil { - return fmt.Errorf("could not retrieve root header's ID by height: %w", err) - } - err = operation.RetrieveHeader(rootID, &rootHeader)(tx) // retrieve root header - if err != nil { - return fmt.Errorf("could not retrieve root header: %w", err) - } - return nil - }) - if err != nil { - return nil, fmt.Errorf("failed to read root information from database: %w", err) - } - return &rootHeader, nil -} diff --git a/state/protocol/badger/snapshot.go b/state/protocol/badger/snapshot.go index 17d286f0ed9..73596997a20 100644 --- a/state/protocol/badger/snapshot.go +++ b/state/protocol/badger/snapshot.go @@ -4,8 +4,6 @@ import ( "errors" "fmt" - "github.com/dgraph-io/badger/v2" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" @@ -15,8 +13,8 @@ import ( "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/procedure" ) // Snapshot implements the protocol.Snapshot interface. @@ -125,6 +123,11 @@ func (s *Snapshot) Commit() (flow.StateCommitment, error) { return seal.FinalState, nil } +// SealedResult returns the most recent included seal as of this block and +// the corresponding execution result. The seal may have been included in a +// parent block, if this block is empty. If this block contains multiple +// seals, this returns the seal for the block with the greatest height. +// TODO document error returns func (s *Snapshot) SealedResult() (*flow.ExecutionResult, *flow.Seal, error) { seal, err := s.state.seals.HighestInFork(s.blockID) if err != nil { @@ -152,7 +155,7 @@ func (s *Snapshot) SealingSegment() (*flow.SealingSegment, error) { // enough history to satisfy _all_ of the following conditions: // (i) The highest sealed block as of `head` needs to be included in the sealing segment. // This is relevant if `head` does not contain any seals. - // (ii) All blocks that are sealed by `head`. This is relevant if head` contains _multiple_ seals. + // (ii) All blocks that are sealed by `head`. This is relevant if `head` contains _multiple_ seals. // (iii) The sealing segment should contain the history back to (including): // limitHeight := max(blockSealedAtHead.Height - flow.DefaultTransactionExpiry, SporkRootBlockHeight) // Per convention, we include the blocks for (i) in the `SealingSegment.Blocks`, while the @@ -301,7 +304,7 @@ func (s *Snapshot) Descendants() ([]flow.Identifier, error) { func (s *Snapshot) lookupChildren(blockID flow.Identifier) ([]flow.Identifier, error) { var children flow.IdentifierList - err := s.state.db.View(procedure.LookupBlockChildren(blockID, &children)) + err := procedure.LookupBlockChildren(s.state.db.Reader(), blockID, &children) if err != nil { return nil, fmt.Errorf("could not get children of block %v: %w", blockID, err) } @@ -542,36 +545,32 @@ func (q *EpochQuery) retrieveEpochHeightBounds(epoch uint64) ( isFirstHeightKnown, isLastHeightKnown bool, err error, ) { - err = q.snap.state.db.View(func(tx *badger.Txn) error { - // Retrieve the epoch's first height - err = operation.RetrieveEpochFirstHeight(epoch, &firstHeight)(tx) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - isFirstHeightKnown = false // unknown boundary - } else { - return err // unexpected error - } - } else { - isFirstHeightKnown = true // known boundary - } - var subsequentEpochFirstHeight uint64 - err = operation.RetrieveEpochFirstHeight(epoch+1, &subsequentEpochFirstHeight)(tx) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - isLastHeightKnown = false // unknown boundary - } else { - return err // unexpected error - } - } else { // known boundary - isLastHeightKnown = true - finalHeight = subsequentEpochFirstHeight - 1 + r := q.snap.state.db.Reader() + // Retrieve the epoch's first height + err = operation.RetrieveEpochFirstHeight(r, epoch, &firstHeight) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + isFirstHeightKnown = false // unknown boundary + } else { + return 0, 0, false, false, err // unexpected error } + } else { + isFirstHeightKnown = true // known boundary + } - return nil - }) + var subsequentEpochFirstHeight uint64 + err = operation.RetrieveEpochFirstHeight(r, epoch+1, &subsequentEpochFirstHeight) if err != nil { - return 0, 0, false, false, err + if errors.Is(err, storage.ErrNotFound) { + isLastHeightKnown = false // unknown boundary + } else { + return 0, 0, false, false, err // unexpected error + } + } else { // known boundary + isLastHeightKnown = true + finalHeight = subsequentEpochFirstHeight - 1 } + return firstHeight, finalHeight, isFirstHeightKnown, isLastHeightKnown, nil } diff --git a/state/protocol/badger/snapshot_test.go b/state/protocol/badger/snapshot_test.go index 849154315bd..d2622a17321 100644 --- a/state/protocol/badger/snapshot_test.go +++ b/state/protocol/badger/snapshot_test.go @@ -146,8 +146,9 @@ func TestSnapshot_Params(t *testing.T) { // TestSnapshot_Descendants builds a sample chain with next structure: // -// A (finalized) <- B <- C <- D <- E <- F -// <- G <- H <- I <- J +// ↙ B ← C ← D ← E ← F +// A (finalized) +// ↖ G ← H ← I ← J // // snapshot.Descendants has to return [B, C, D, E, F, G, H, I, J]. func TestSnapshot_Descendants(t *testing.T) { @@ -156,12 +157,18 @@ func TestSnapshot_Descendants(t *testing.T) { rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) head, err := rootSnapshot.Head() require.NoError(t, err) + + // In this test, we create two conflicting forks. To prevent accidentally creating byzantine scenarios, where + // multiple blocks have the same view, we keep track of used views and ensure that each new block has a unique view. + usedViews := make(map[uint64]struct{}) + usedViews[head.View] = struct{}{} + util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { var expectedBlocks []flow.Identifier - for i := 5; i > 3; i-- { + for forkLength := range []int{5, 4} { // construct two forks with length 5 and 4, respectively parent := head - for n := 0; n < i; n++ { - block := unittest.BlockWithParentFixture(parent) + for n := 0; n < forkLength; n++ { + block := unittest.BlockWithParentAndUniqueView(parent, usedViews) block.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) err := state.Extend(context.Background(), block) require.NoError(t, err) @@ -231,10 +238,13 @@ func TestClusters(t *testing.T) { seal.ResultID = result.ID() safetyParams, err := protocol.DefaultEpochSafetyParams(root.Header.ChainID) require.NoError(t, err) + minEpochStateEntry, err := inmem.EpochProtocolStateFromServiceEvents(setup, commit) + require.NoError(t, err) rootProtocolState, err := kvstore.NewDefaultKVStore( safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, - inmem.EpochProtocolStateFromServiceEvents(setup, commit).ID()) + minEpochStateEntry.ID(), + ) require.NoError(t, err) root.Payload.ProtocolStateID = rootProtocolState.ID() rootSnapshot, err := inmem.SnapshotFromBootstrapState(root, result, seal, qc) @@ -366,7 +376,6 @@ func TestSealingSegment(t *testing.T) { // Expected sealing segment: [B1, ..., BN], extra blocks: [ROOT] t.Run("long sealing segment", func(t *testing.T) { util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { - // build a block to seal block1 := unittest.BlockWithParentFixture(head) block1.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) @@ -419,7 +428,6 @@ func TestSealingSegment(t *testing.T) { // Expected sealing segment: [B2, B3, B4], Extra blocks: [ROOT, B1] t.Run("overlapping sealing segment", func(t *testing.T) { util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { - block1 := unittest.BlockWithParentFixture(head) block1.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) buildFinalizedBlock(t, state, block1) @@ -670,8 +678,8 @@ func TestSealingSegment(t *testing.T) { blocks := make([]*flow.Block, 0, flow.DefaultTransactionExpiry+3) parent := root for i := 0; i < flow.DefaultTransactionExpiry+1; i++ { - next := unittest.BlockWithParentProtocolState(parent) - next.Header.View = next.Header.Height + 1 // set view so we are still in the same epoch + next := unittest.BlockWithParentProtocolState(parent) // this creates a child, whose view is incremented by up to 10 + next.Header.View = parent.Header.View + 1 // set view increment to 1, so we remain still in the same epoch buildFinalizedBlock(t, state, next) blocks = append(blocks, next) parent = next @@ -753,8 +761,8 @@ func TestSealingSegment(t *testing.T) { // build chain, so it's long enough to not target blocks as inside of flow.DefaultTransactionExpiry window. parent := block4 for i := 0; i < 1.5*flow.DefaultTransactionExpiry; i++ { - next := unittest.BlockWithParentProtocolState(parent) - next.Header.View = next.Header.Height + 1 // set view so we are still in the same epoch + next := unittest.BlockWithParentProtocolState(parent) // this creates a child, whose view is incremented by up to 10 + next.Header.View = parent.Header.View + 1 // set view increment to 1, so we remain still in the same epoch buildFinalizedBlock(t, state, next) parent = next } @@ -815,7 +823,7 @@ func TestSealingSegment_FailureCases(t *testing.T) { t.Run("sealing segment from block below local state root", func(t *testing.T) { // Step I: constructing bootstrapping snapshot with some short history: // - // ╭───── finalized blocks ─────╮ + // ╭───── finalized blocks ─────╮ // <- b1 <- b2(result(b1)) <- b3(seal(b1)) <- // └── head ──┘ // @@ -864,16 +872,16 @@ func TestSealingSegment_FailureCases(t *testing.T) { // SCENARIO 2a: A pending block is chosen as head; at this height no block has been finalized. t.Run("sealing segment from unfinalized, pending block", func(t *testing.T) { util.RunWithFollowerProtocolState(t, sporkRootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { - // add _unfinalized_ blocks b1 and b2 to state (block b5 is necessary, so b1 has a QC, which is a consistency requirement for subsequent finality) + // add _unfinalized_ blocks b1 and b2 to state (block b2 is necessary, so b1 has a QC, which is a consistency requirement for subsequent finality) b1 := unittest.BlockWithParentFixture(sporkRoot) b1.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) b2 := unittest.BlockWithParentFixture(b1.Header) b2.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) - require.NoError(t, state.ExtendCertified(context.Background(), b1, b2.Header.QuorumCertificate())) - require.NoError(t, state.ExtendCertified(context.Background(), b2, unittest.CertifyBlock(b2.Header))) // adding block b5 (providing required QC for b1) + require.NoError(t, state.ExtendCertified(context.Background(), b1, b2.Header.ParentQC())) + require.NoError(t, state.ExtendCertified(context.Background(), b2, unittest.CertifyBlock(b2.Header))) // adding block b2 (providing required QC for b1) // consistency check: there should be no finalized block in the protocol state at height `b1.Height` - _, err := state.AtHeight(b1.Header.Height).Head() // expect statepkg.ErrUnknownSnapshotReference as only finalized blocks are indexed by height + _, err = state.AtHeight(b1.Header.Height).Head() // expect statepkg.ErrUnknownSnapshotReference as only finalized blocks are indexed by height assert.ErrorIs(t, err, statepkg.ErrUnknownSnapshotReference) // requesting a sealing segment from block b1 should fail, as b1 is not yet finalized @@ -884,13 +892,18 @@ func TestSealingSegment_FailureCases(t *testing.T) { // SCENARIO 2b: An orphaned block is chosen as head; at this height a block other than the orphaned has been finalized. t.Run("sealing segment from orphaned block", func(t *testing.T) { + // In this test, we create two conflicting forks. To prevent accidentally creating byzantine scenarios, where + // multiple blocks have the same view, we keep track of used views and ensure that each new block has a unique view. + usedViews := make(map[uint64]struct{}) + usedViews[sporkRoot.View] = struct{}{} + util.RunWithFollowerProtocolState(t, sporkRootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { - orphaned := unittest.BlockWithParentFixture(sporkRoot) + orphaned := unittest.BlockWithParentAndUniqueView(sporkRoot, usedViews) orphaned.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) - orphanedChild := unittest.BlockWithParentProtocolState(orphaned) - require.NoError(t, state.ExtendCertified(context.Background(), orphaned, orphanedChild.Header.QuorumCertificate())) + orphanedChild := unittest.BlockWithParentProtocolStateAndUniqueView(orphaned, usedViews) + require.NoError(t, state.ExtendCertified(context.Background(), orphaned, orphanedChild.Header.ParentQC())) require.NoError(t, state.ExtendCertified(context.Background(), orphanedChild, unittest.CertifyBlock(orphanedChild.Header))) - block := unittest.BlockWithParentFixture(sporkRoot) + block := unittest.BlockWithParentAndUniqueView(sporkRoot, usedViews) block.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) buildFinalizedBlock(t, state, block) @@ -904,7 +917,6 @@ func TestSealingSegment_FailureCases(t *testing.T) { assert.True(t, protocol.IsUnfinalizedSealingSegmentError(err)) }) }) - } // TestBootstrapSealingSegmentWithExtraBlocks test sealing segment where the segment blocks contain collection @@ -1044,13 +1056,13 @@ func TestLatestSealedResult(t *testing.T) { unittest.WithProtocolStateID(rootProtocolStateID), )) - err = state.ExtendCertified(context.Background(), block1, block2.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), block1, block2.Header.ParentQC()) require.NoError(t, err) - err = state.ExtendCertified(context.Background(), block2, block3.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), block2, block3.Header.ParentQC()) require.NoError(t, err) - err = state.ExtendCertified(context.Background(), block3, block4.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), block3, block4.Header.ParentQC()) require.NoError(t, err) // B1 <- B2(R1) <- B3(S1) @@ -1062,7 +1074,7 @@ func TestLatestSealedResult(t *testing.T) { assert.Equal(t, block3.Payload.Seals[0], gotSeal) }) - err = state.ExtendCertified(context.Background(), block4, block5.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), block4, block5.Header.ParentQC()) require.NoError(t, err) // B1 <- B2(S1) <- B3(S1) <- B4(R2,R3) diff --git a/state/protocol/badger/state.go b/state/protocol/badger/state.go index ce5e675aa6d..7ae6da2333f 100644 --- a/state/protocol/badger/state.go +++ b/state/protocol/badger/state.go @@ -5,19 +5,19 @@ import ( "fmt" "sync/atomic" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" statepkg "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/datastore" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/state/protocol/invalid" protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage/operation" ) // cachedLatest caches both latest finalized and sealed block @@ -31,14 +31,15 @@ type cachedLatest struct { } type State struct { - metrics module.ComplianceMetrics - db *badger.DB - headers storage.Headers - blocks storage.Blocks - qcs storage.QuorumCertificates - results storage.ExecutionResults - seals storage.Seals - epoch struct { + metrics module.ComplianceMetrics + db storage.DB + lockManager lockctx.Manager + headers storage.Headers + blocks storage.Blocks + qcs storage.QuorumCertificates + results storage.ExecutionResults + seals storage.Seals + epoch struct { setups storage.EpochSetups commits storage.EpochCommits } @@ -87,9 +88,12 @@ func SkipNetworkAddressValidation(conf *BootstrapConfig) { conf.SkipNetworkAddressValidation = true } +// Bootstrap initializes a the protocol state from the provided root snapshot and persists it to the database. +// No errors expected during normal operation. func Bootstrap( metrics module.ComplianceMetrics, - db *badger.DB, + db storage.DB, + lockManager lockctx.Manager, headers storage.Headers, seals storage.Seals, results storage.ExecutionResults, @@ -103,6 +107,19 @@ func Bootstrap( root protocol.Snapshot, options ...BootstrapConfigOptions, ) (*State, error) { + // we acquire both [storage.LockInsertBlock] and [storage.LockFinalizeBlock] because + // the bootstrapping process inserts and finalizes blocks (all blocks within the + // trusted root snapshot are presumed to be finalized) + lctx := lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertBlock) + if err != nil { + return nil, err + } + err = lctx.AcquireLock(storage.LockFinalizeBlock) + if err != nil { + return nil, err + } config := defaultBootstrapConfig() for _, opt := range options { @@ -117,7 +134,7 @@ func Bootstrap( return nil, fmt.Errorf("expected empty database") } - if err := IsValidRootSnapshot(root, !config.SkipNetworkAddressValidation); err != nil { + if err := datastore.IsValidRootSnapshot(root, !config.SkipNetworkAddressValidation); err != nil { return nil, fmt.Errorf("cannot bootstrap invalid root snapshot: %w", err) } @@ -131,109 +148,90 @@ func Bootstrap( return nil, fmt.Errorf("could not get sealed result for sealing segment: %w", err) } - err = operation.RetryOnConflictTx(db, transaction.Update, func(tx *transaction.Tx) error { - // sealing segment is in ascending height order, so the tail is the - // oldest ancestor and head is the newest child in the segment - // TAIL <- ... <- HEAD - lastFinalized := segment.Finalized() // the highest block in sealing segment is the last finalized block - lastSealed := segment.Sealed() // the lowest block in sealing segment is the last sealed block - - // bootstrap the sealing segment - // creating sealed root block with the rootResult - // creating finalized root block with lastFinalized - err = bootstrapSealingSegment(blocks, qcs, segment, lastFinalized, rootSeal)(tx) - if err != nil { - return fmt.Errorf("could not bootstrap sealing chain segment blocks: %w", err) - } + // sealing segment lists blocks in order of ascending height, so the tail + // is the oldest ancestor and head is the newest child in the segment + // TAIL <- ... <- HEAD + // Per definition, the highest block in sealing segment is the last finalized block + // and the lowest block in sealing segment is the last sealed block. + lastFinalized := segment.Finalized() + + // bootstrap the sealing segment + // creating sealed root block with the rootResult + // creating finalized root block with lastFinalized + err = bootstrapSealingSegment(lctx, db, blocks, qcs, segment, lastFinalized, rootSeal) + if err != nil { + return nil, fmt.Errorf("could not bootstrap sealing chain segment blocks: %w", err) + } + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // insert the root quorum certificate into the database qc, err := root.QuorumCertificate() if err != nil { return fmt.Errorf("could not get root qc: %w", err) } - err = qcs.StoreTx(qc)(tx) + err = qcs.BatchStore(lctx, rw, qc) if err != nil { return fmt.Errorf("could not insert root qc: %w", err) } - // initialize the current protocol state height/view pointers - err = bootstrapStatePointers(root)(tx) - if err != nil { - return fmt.Errorf("could not bootstrap height/view pointers: %w", err) - } - // initialize spork params - err = bootstrapSporkInfo(root)(tx) + err = bootstrapSporkInfo(rw, root) if err != nil { return fmt.Errorf("could not bootstrap spork info: %w", err) } // bootstrap dynamic protocol state - if err != nil { - return fmt.Errorf("could not retrieve protocol state for root snapshot: %w", err) - } - err = bootstrapProtocolState(segment, root.Params(), epochProtocolStateSnapshots, protocolKVStoreSnapshots, setups, commits, !config.SkipNetworkAddressValidation)(tx) + err = bootstrapProtocolState(lctx, rw, segment, root.Params(), epochProtocolStateSnapshots, protocolKVStoreSnapshots, setups, commits, !config.SkipNetworkAddressValidation) if err != nil { return fmt.Errorf("could not bootstrap protocol state: %w", err) } // initialize version beacon - err = boostrapVersionBeacon(root)(tx) + err = boostrapVersionBeacon(rw, root) if err != nil { return fmt.Errorf("could not bootstrap version beacon: %w", err) } - - err = updateEpochMetrics(metrics, root) - if err != nil { - return fmt.Errorf("could not update epoch metrics: %w", err) - } - metrics.BlockSealed(lastSealed) - metrics.SealedHeight(lastSealed.Header.Height) - metrics.FinalizedHeight(lastFinalized.Header.Height) - for _, block := range segment.Blocks { - metrics.BlockFinalized(block) - } - return nil }) if err != nil { return nil, fmt.Errorf("bootstrapping failed: %w", err) } - instanceParams, err := ReadInstanceParams(db, headers, seals) + // CAUTION: INSERT FINALIZED HEIGHT must be LAST, because we use its existence in the database + // as indicator that the protocol database has been bootstrapped successfully. Before we write the + // final piece of data to complete the bootstrapping, we query the current state of the database + // (anity check) to ensure that it is still considered as not properly bootstrapped. + isBootstrapped, err = IsBootstrapped(db) if err != nil { - return nil, fmt.Errorf("could not read instance params: %w", err) + return nil, fmt.Errorf("determining whether database is successfully bootstrapped failed with unexpected exception: %w", err) } - - params := &Params{ - GlobalParams: root.Params(), - InstanceParams: instanceParams, + if isBootstrapped { // we haven't written the latest finalized height yet, so this vaule must be false + return nil, fmt.Errorf("sanity check failed: while bootstrapping has not yet completed, the implementation already considers the protocol state as successfully bootstrapped") + } + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // initialize the current protocol state height/view pointers + return bootstrapStatePointers(lctx, rw, root) + }) + if err != nil { + return nil, fmt.Errorf("could not bootstrap height/view pointers: %w", err) } - return newState( - metrics, - db, - headers, - seals, - results, - blocks, - qcs, - setups, - commits, - epochProtocolStateSnapshots, - protocolKVStoreSnapshots, - versionBeacons, - params, - ) + state, err := OpenState(metrics, db, lockManager, headers, seals, results, blocks, qcs, setups, commits, epochProtocolStateSnapshots, protocolKVStoreSnapshots, versionBeacons) + if err != nil { + return nil, fmt.Errorf("bootstrapping failed, because the resulting database state is rejected: %w", err) + } + return state, nil } -// bootstrapProtocolState bootstraps data structures needed for Dynamic Protocol State. +// bootstrapProtocolStates bootstraps data structures needed for Dynamic Protocol State. // The sealing segment may contain blocks committing to different Protocol State entries, // in which case each of these protocol state entries are stored in the database during // bootstrapping. // For each distinct protocol state entry, we also store the associated EpochSetup and // EpochCommit service events. func bootstrapProtocolState( + lctx lockctx.Proof, + rw storage.ReaderBatchWriter, segment *flow.SealingSegment, params protocol.GlobalParams, epochProtocolStateSnapshots storage.EpochProtocolStateEntries, @@ -241,112 +239,184 @@ func bootstrapProtocolState( epochSetups storage.EpochSetups, epochCommits storage.EpochCommits, verifyNetworkAddress bool, -) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - // The sealing segment contains a protocol state entry for every block in the segment, including the root block. - for protocolStateID, stateEntry := range segment.ProtocolStateEntries { - // Store the protocol KV Store entry - err := operation.SkipDuplicatesTx(protocolKVStoreSnapshots.StoreTx(protocolStateID, &stateEntry.KVStore))(tx) - if err != nil { - return fmt.Errorf("could not store protocol state kvstore: %w", err) - } - - // Store the epoch portion of the protocol state, including underlying EpochSetup/EpochCommit service events - dynamicEpochProtocolState := inmem.NewEpochProtocolStateAdapter(stateEntry.EpochEntry, params) - err = bootstrapEpochForProtocolStateEntry(epochProtocolStateSnapshots, epochSetups, epochCommits, dynamicEpochProtocolState, verifyNetworkAddress)(tx) - if err != nil { - return fmt.Errorf("could not store epoch service events for state entry (id=%x): %w", stateEntry.EpochEntry.ID(), err) - } +) error { + // The sealing segment contains a protocol state entry for every block in the segment, including the root block. + for protocolStateID, stateEntry := range segment.ProtocolStateEntries { + // Store the protocol KV Store entry + err := protocolKVStoreSnapshots.BatchStore(lctx, rw, protocolStateID, &stateEntry.KVStore) + if err != nil { + return fmt.Errorf("could not store protocol state kvstore: %w", err) } - for _, block := range segment.AllBlocks() { - blockID := block.ID() - protocolStateEntryWrapper := segment.ProtocolStateEntries[block.Payload.ProtocolStateID] - err := epochProtocolStateSnapshots.Index(blockID, protocolStateEntryWrapper.EpochEntry.ID())(tx) - if err != nil { - return fmt.Errorf("could not index root protocol state: %w", err) - } - err = protocolKVStoreSnapshots.IndexTx(blockID, block.Payload.ProtocolStateID)(tx) - if err != nil { - return fmt.Errorf("could not index root kv store: %w", err) - } + // Store the epoch portion of the protocol state, including underlying EpochSetup/EpochCommit service events + dynamicEpochProtocolState, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: stateEntry.EpochEntry, + Params: params, + }, + ) + if err != nil { + return fmt.Errorf("could not construct epoch protocol state adapter: %w", err) + } + err = bootstrapEpochForProtocolStateEntry(rw, epochProtocolStateSnapshots, epochSetups, epochCommits, dynamicEpochProtocolState, verifyNetworkAddress) + if err != nil { + return fmt.Errorf("could not store epoch service events for state entry (id=%x): %w", stateEntry.EpochEntry.ID(), err) } + } - return nil + for _, block := range segment.AllBlocks() { + blockID := block.ID() + protocolStateEntryWrapper := segment.ProtocolStateEntries[block.Payload.ProtocolStateID] + err := epochProtocolStateSnapshots.BatchIndex(rw, blockID, protocolStateEntryWrapper.EpochEntry.ID()) + if err != nil { + return fmt.Errorf("could not index root protocol state: %w", err) + } + err = protocolKVStoreSnapshots.BatchIndex(lctx, rw, blockID, block.Payload.ProtocolStateID) + if err != nil { + return fmt.Errorf("could not index root kv store: %w", err) + } } + + return nil } -// bootstrapSealingSegment inserts all blocks and associated metadata for the -// protocol state root snapshot to disk. +// bootstrapSealingSegment inserts all blocks and associated metadata for the protocol state root +// snapshot to disk. We proceed as follows: +// 1. we persist the auxiliary execution results from the sealing segment +// 2. persist extra blocks from the sealing segment; these blocks are below the history cut-off and +// therefore not fully indexed (we only index the blocks by height). +// 3. persist sealing segment Blocks and properly populate all indices as if those blocks: +// - blocks are index by their heights +// - latest seale is indexed for each block +// - children of each block is initialized with the set containing the child block +// 4. For the highest seal (`rootSeal`), we index the sealed result ID in the database. +// This is necessary for the execution node to confirm that it is starting to execute from the +// correct state. func bootstrapSealingSegment( + lctx lockctx.Proof, + db storage.DB, blocks storage.Blocks, qcs storage.QuorumCertificates, segment *flow.SealingSegment, head *flow.Block, rootSeal *flow.Seal, -) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - txn := tx.DBTxn // tx is just a wrapper around a badger transaction with the additional ability to register callbacks that are executed after the badger transaction completed _successfully_ +) error { + // STEP 1: persist AUXILIARY EXECUTION RESULTS (should include the result sealed by segment.FirstSeal if that is not nil) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + w := rw.Writer() for _, result := range segment.ExecutionResults { - err := operation.SkipDuplicates(operation.InsertExecutionResult(result))(txn) + err := operation.InsertExecutionResult(w, result) if err != nil { return fmt.Errorf("could not insert execution result: %w", err) } - err = operation.IndexExecutionResult(result.BlockID, result.ID())(txn) + err = operation.IndexExecutionResult(w, result.BlockID, result.ID()) if err != nil { return fmt.Errorf("could not index execution result: %w", err) } } + return nil + }) + if err != nil { + return err + } - // insert the first seal (in case the segment's first block contains no seal) - if segment.FirstSeal != nil { - err := operation.InsertSeal(segment.FirstSeal.ID(), segment.FirstSeal)(txn) - if err != nil { - return fmt.Errorf("could not insert first seal: %w", err) - } - } - - // root seal contains the result ID for the sealed root block. If the sealed root block is - // different from the finalized root block, then it means the node dynamically bootstrapped. - // In that case, we should index the result of the sealed root block so that the EN is able - // to execute the next block. - err := operation.SkipDuplicates(operation.IndexExecutionResult(rootSeal.BlockID, rootSeal.ResultID))(txn) - if err != nil { - return fmt.Errorf("could not index root result: %w", err) - } - - for _, block := range segment.ExtraBlocks { + // STEP 2: persist EXTRA BLOCKS to the database + // These blocks are _ancestors_ of `segment.Blocks`, i.e. below the history cut-off. Therefore, we only persist the extra blocks + // and index them by height, while all the other indices are omitted, as they would potentially reference non-existent data. + // + // We PERSIST these blocks ONE-BY-ONE in order of increasing height, + // emulating the process during normal operations, the the following reason: + // * Execution Receipts are incorporated into blocks for bookkeeping when and which execution results the ENs published. + // * Typically, most ENs commit to the same results. Therefore, Results in blocks are stored separately from the Receipts + // in blocks and deduplicated along the fork -- specifically, we only store the result along a fork in the first block + // containing an execution receipt committing to that result. For receipts committing to the same result in descending + // blocks, we only store the receipt and omit the result as it is already contained in an ancestor. + // * We want to ensure that for every receipt in a block that we store, the result is also going to be available in storage + // [Blocks.BatchStore] automatically performs this check and errors when attempting to store a block referencing unknown + // results. + // * During normal operations, we ingest and persist blocks one by one. However, during bootstrapping we need to store + // multiple blocks. Hypothetically, if we were to store all blocks in the same batch, results included in ancestor blocks + // would not be persisted in the database yet when attempting to persist their descendants. In other words, the check in + // [Blocks.BatchStore] can't distinguish between a receipt referencing a missing result vs a receipt referencing a result + // that is contained in a previous block being stored as part of the same batch. + for _, block := range segment.ExtraBlocks { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { blockID := block.ID() height := block.Header.Height - err := blocks.StoreTx(block)(tx) + err := blocks.BatchStore(lctx, rw, block) if err != nil { return fmt.Errorf("could not insert SealingSegment extra block: %w", err) } - err = operation.IndexBlockHeight(height, blockID)(txn) + err = operation.IndexFinalizedBlockByHeight(lctx, rw, height, blockID) if err != nil { return fmt.Errorf("could not index SealingSegment extra block (id=%x): %w", blockID, err) } - err = qcs.StoreTx(block.Header.QuorumCertificate())(tx) - if err != nil { - return fmt.Errorf("could not store qc for SealingSegment extra block (id=%x): %w", blockID, err) + + if block.Header.ContainsParentQC() { + err = qcs.BatchStore(lctx, rw, block.Header.ParentQC()) + if err != nil { + return fmt.Errorf("could not store qc for SealingSegment extra block (id=%x): %w", blockID, err) + } + } + + return nil + }) + + if err != nil { + return err + } + } + + // STEP 3: persist sealing segment Blocks and properly populate all indices as if those blocks + // For each block B, we index the highest seal in the fork with head B. To sanity check proper state construction, we want to ensure that the referenced + // seal actually exists in the database at the end of the bootstrapping process. Therefore, we track all the seals that we are storing and error in case + // we attempt to reference a seal that is not in that set. It is fine to omit any seals in `segment.ExtraBlocks` for the following reason: + // * Let's consider the lowest-height block in `segment.Blocks`, by convention `segment.Blocks[0]`, and call it B1. + // * If B1 contains seals, then the latest seal as of B1 is part of the block's payload. S1 will be stored in the database while persisting B1. + // * If and only if B1 contains no seal, then `segment.FirstSeal` is set to the latest seal included in an ancestor of B1 (see [flow.SealingSegment] + // documentation). We explicitly store FirstSeal in the database. + // * By induction, this argument can be applied to all subsequent blocks in `segment.Blocks`. Hence, the index `LatestSealAtBlock` is correctly populated + // for all blocks in `segment.Blocks`. + sealsLookup := make(map[flow.Identifier]struct{}) + sealsLookup[rootSeal.ID()] = struct{}{} + if segment.FirstSeal != nil { // in case the segment's first block contains no seal, insert the first seal + sealsLookup[segment.FirstSeal.ID()] = struct{}{} + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + if segment.FirstSeal != nil { + err := operation.InsertSeal(rw.Writer(), segment.FirstSeal.ID(), segment.FirstSeal) + if err != nil { + return fmt.Errorf("could not insert first seal: %w", err) + } } + return nil + }) + if err != nil { + return err } + } - for i, block := range segment.Blocks { + // PERSIST these blocks ONE-BY-ONE in order of increasing height, emulating the process during normal operations, + // so sanity checks from normal operations should continue to apply. + for i, block := range segment.Blocks { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + w := rw.Writer() blockID := block.ID() height := block.Header.Height - err := blocks.StoreTx(block)(tx) + err := blocks.BatchStore(lctx, rw, block) if err != nil { return fmt.Errorf("could not insert SealingSegment block: %w", err) } - err = operation.IndexBlockHeight(height, blockID)(txn) + err = operation.IndexFinalizedBlockByHeight(lctx, rw, height, blockID) if err != nil { return fmt.Errorf("could not index SealingSegment block (id=%x): %w", blockID, err) } - err = qcs.StoreTx(block.Header.QuorumCertificate())(tx) - if err != nil { - return fmt.Errorf("could not store qc for SealingSegment block (id=%x): %w", blockID, err) + + if block.Header.ContainsParentQC() { + err = qcs.BatchStore(lctx, rw, block.Header.ParentQC()) + if err != nil { + return fmt.Errorf("could not store qc for SealingSegment block (id=%x): %w", blockID, err) + } } // index the latest seal as of this block @@ -354,125 +424,166 @@ func bootstrapSealingSegment( if !ok { return fmt.Errorf("missing latest seal for sealing segment block (id=%s)", blockID) } + + // build seals lookup + for _, seal := range block.Payload.Seals { + sealsLookup[seal.ID()] = struct{}{} + } // sanity check: make sure the seal exists - var latestSeal flow.Seal - err = operation.RetrieveSeal(latestSealID, &latestSeal)(txn) - if err != nil { - return fmt.Errorf("could not verify latest seal for block (id=%x) exists: %w", blockID, err) + _, ok = sealsLookup[latestSealID] + if !ok { + return fmt.Errorf("sanity check fail: missing latest seal for sealing segment block (id=%s)", blockID) } - err = operation.IndexLatestSealAtBlock(blockID, latestSealID)(txn) + err = operation.IndexLatestSealAtBlock(lctx, w, blockID, latestSealID) if err != nil { return fmt.Errorf("could not index block seal: %w", err) } - // for all but the first block in the segment, index the parent->child relationship + // For all but the first block in the segment, index the parent->child relationship: if i > 0 { - err = operation.InsertBlockChildren(block.Header.ParentID, []flow.Identifier{blockID})(txn) + err = operation.UpsertBlockChildren(lctx, w, block.Header.ParentID, []flow.Identifier{blockID}) if err != nil { return fmt.Errorf("could not insert child index for block (id=%x): %w", blockID, err) } } - } + if i == len(segment.Blocks)-1 { // in addition, for the highest block in the sealing segment, the known set of children is empty: + err = operation.UpsertBlockChildren(lctx, rw.Writer(), head.ID(), nil) + if err != nil { + return fmt.Errorf("could not insert child index for head block (id=%x): %w", head.ID(), err) + } + } - // insert an empty child index for the final block in the segment - err = operation.InsertBlockChildren(head.ID(), nil)(txn) + return nil + }) if err != nil { - return fmt.Errorf("could not insert child index for head block (id=%x): %w", head.ID(), err) + return err } - - return nil } -} -// bootstrapStatePointers instantiates special pointers used to by the protocol -// state to keep track of special block heights and views. -func bootstrapStatePointers(root protocol.Snapshot) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - segment, err := root.SealingSegment() + // STEP 4: For the highest seal (`rootSeal`), we index the sealed result ID in the database. + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // sanity check existence of referenced execution result (should have been stored in STEP 1) + var result flow.ExecutionResult + err := operation.RetrieveExecutionResult(rw.GlobalReader(), rootSeal.ResultID, &result) if err != nil { - return fmt.Errorf("could not get sealing segment: %w", err) + return fmt.Errorf("missing sealed execution result %v: %w", rootSeal.ResultID, err) } - highest := segment.Finalized() - lowest := segment.Sealed() - // find the finalized seal that seals the lowest block, meaning seal.BlockID == lowest.ID() - seal, err := segment.FinalizedSeal() + + // If the sealed root block is different from the finalized root block, then it means the node dynamically + // bootstrapped. In that case, we index the result of the latest sealed result, so that the EN is able + // to confirm that it is loading the correct state to execute the next block. + err = operation.IndexExecutionResult(rw.Writer(), rootSeal.BlockID, rootSeal.ResultID) if err != nil { - return fmt.Errorf("could not get finalized seal from sealing segment: %w", err) + return fmt.Errorf("could not index root result: %w", err) } - safetyData := &hotstuff.SafetyData{ - LockedOneChainView: highest.Header.View, - HighestAcknowledgedView: highest.Header.View, - } + return nil + }) + if err != nil { + return err + } - // Per convention, all blocks in the sealing segment must be finalized. Therefore, a QC must - // exist for the `highest` block in the sealing segment. The QC for `highest` should be - // contained in the `root` Snapshot and returned by `root.QuorumCertificate()`. Otherwise, - // the Snapshot is incomplete, because consensus nodes require this QC. To reduce the chance of - // accidental misconfiguration undermining consensus liveness, we do the following sanity checks: - // * `rootQC` should not be nil - // * `rootQC` should be for `highest` block, i.e. its view and blockID should match - rootQC, err := root.QuorumCertificate() - if err != nil { - return fmt.Errorf("could not get root QC: %w", err) - } - if rootQC == nil { - return fmt.Errorf("QC for highest (finalized) block in sealing segment cannot be nil") - } - if rootQC.View != highest.Header.View { - return fmt.Errorf("root QC's view %d does not match the highest block in sealing segment (view %d)", rootQC.View, highest.Header.View) - } - if rootQC.BlockID != highest.Header.ID() { - return fmt.Errorf("root QC is for block %v, which does not match the highest block %v in sealing segment", rootQC.BlockID, highest.Header.ID()) - } + return nil +} - livenessData := &hotstuff.LivenessData{ - CurrentView: highest.Header.View + 1, - NewestQC: rootQC, - } +// bootstrapStatePointers instantiates central pointers used to by the protocol +// state for keeping track of lifecycle variables: +// - Consensus Safety and Liveness Data (only used by consensus participants) +// - Root Block's Height (heighest block in sealing segment) +// - Sealed Root Block Height (block height sealed as of the Root Block) +// - Latest Finalized Height (initialized to height of Root Block) +// - Latest Sealed Block Height (initialized to block height sealed as of the Root Block) +// - initial entry in map: +// Finalized Block ID -> ID of latest seal in fork with this block as head +func bootstrapStatePointers(lctx lockctx.Proof, rw storage.ReaderBatchWriter, root protocol.Snapshot) error { + // sealing segment lists blocks in order of ascending height, so the tail + // is the oldest ancestor and head is the newest child in the segment + // TAIL <- ... <- HEAD + segment, err := root.SealingSegment() + if err != nil { + return fmt.Errorf("could not get sealing segment: %w", err) + } + highest := segment.Finalized() // the highest block in sealing segment is the last finalized block + lowest := segment.Sealed() // the lowest block in sealing segment is the last sealed block - bdtx := tx.DBTxn // tx is just a wrapper around a badger transaction with the additional ability to register callbacks that are executed after the badger transaction completed _successfully_ - // insert initial views for HotStuff - err = operation.InsertSafetyData(highest.Header.ChainID, safetyData)(bdtx) - if err != nil { - return fmt.Errorf("could not insert safety data: %w", err) - } - err = operation.InsertLivenessData(highest.Header.ChainID, livenessData)(bdtx) - if err != nil { - return fmt.Errorf("could not insert liveness data: %w", err) - } + // find the finalized seal that seals the lowest block, meaning seal.BlockID == lowest.ID() + seal, err := segment.FinalizedSeal() + if err != nil { + return fmt.Errorf("could not get finalized seal from sealing segment: %w", err) + } - // insert height pointers - err = operation.InsertRootHeight(highest.Header.Height)(bdtx) - if err != nil { - return fmt.Errorf("could not insert finalized root height: %w", err) - } - // the sealed root height is the lowest block in sealing segment - err = operation.InsertSealedRootHeight(lowest.Header.Height)(bdtx) - if err != nil { - return fmt.Errorf("could not insert sealed root height: %w", err) - } - err = operation.InsertFinalizedHeight(highest.Header.Height)(bdtx) - if err != nil { - return fmt.Errorf("could not insert finalized height: %w", err) - } - err = operation.InsertSealedHeight(lowest.Header.Height)(bdtx) - if err != nil { - return fmt.Errorf("could not insert sealed height: %w", err) - } - err = operation.IndexFinalizedSealByBlockID(seal.BlockID, seal.ID())(bdtx) - if err != nil { - return fmt.Errorf("could not index sealed block: %w", err) - } + safetyData := &hotstuff.SafetyData{ + LockedOneChainView: highest.Header.View, + HighestAcknowledgedView: highest.Header.View, + } - // insert first-height indices for epochs which begin within the sealing segment - err = indexEpochHeights(segment)(bdtx) - if err != nil { - return fmt.Errorf("could not index epoch heights: %w", err) - } + // Per convention, all blocks in the sealing segment must be finalized. Therefore, a QC must + // exist for the `highest` block in the sealing segment. The QC for `highest` should be + // contained in the `root` Snapshot and returned by `root.QuorumCertificate()`. Otherwise, + // the Snapshot is incomplete, because consensus nodes require this QC. To reduce the chance of + // accidental misconfiguration undermining consensus liveness, we do the following sanity checks: + // * `rootQC` should not be nil + // * `rootQC` should be for `highest` block, i.e. its view and blockID should match + rootQC, err := root.QuorumCertificate() + if err != nil { + return fmt.Errorf("could not get root QC: %w", err) + } + if rootQC == nil { + return fmt.Errorf("QC for highest (finalized) block in sealing segment cannot be nil") + } + if rootQC.View != highest.Header.View { + return fmt.Errorf("root QC's view %d does not match the highest block in sealing segment (view %d)", rootQC.View, highest.Header.View) + } + if rootQC.BlockID != highest.Header.ID() { + return fmt.Errorf("root QC is for block %v, which does not match the highest block %v in sealing segment", rootQC.BlockID, highest.Header.ID()) + } - return nil + livenessData := &hotstuff.LivenessData{ + CurrentView: highest.Header.View + 1, + NewestQC: rootQC, + } + + w := rw.Writer() + // insert initial views for HotStuff + err = operation.UpsertSafetyData(w, highest.Header.ChainID, safetyData) + if err != nil { + return fmt.Errorf("could not insert safety data: %w", err) + } + err = operation.UpsertLivenessData(w, highest.Header.ChainID, livenessData) + if err != nil { + return fmt.Errorf("could not insert liveness data: %w", err) + } + + // insert height pointers + err = operation.InsertRootHeight(w, highest.Header.Height) + if err != nil { + return fmt.Errorf("could not insert finalized root height: %w", err) + } + // the sealed root height is the lowest block in sealing segment + err = operation.InsertSealedRootHeight(w, lowest.Header.Height) + if err != nil { + return fmt.Errorf("could not insert sealed root height: %w", err) + } + err = operation.UpsertFinalizedHeight(lctx, w, highest.Header.Height) + if err != nil { + return fmt.Errorf("could not insert finalized height: %w", err) + } + err = operation.UpsertSealedHeight(lctx, w, lowest.Header.Height) + if err != nil { + return fmt.Errorf("could not insert sealed height: %w", err) + } + err = operation.IndexFinalizedSealByBlockID(w, seal.BlockID, seal.ID()) + if err != nil { + return fmt.Errorf("could not index sealed block: %w", err) } + + // insert first-height indices for epochs which begin within the sealing segment + err = indexEpochHeights(lctx, rw, segment) + if err != nil { + return fmt.Errorf("could not index epoch heights: %w", err) + } + + return nil } // bootstrapEpochForProtocolStateEntry bootstraps the protocol state database with epoch @@ -480,40 +591,40 @@ func bootstrapStatePointers(root protocol.Snapshot) func(*transaction.Tx) error // a particular Dynamic Protocol State entry. // There may be several such entries within a single root snapshot, in which case this // function is called once for each entry. Entries may overlap in which underlying -// epoch information (service events) they reference, which case duplicate writes of -// the same data are ignored. +// epoch information (service events) they reference -- this only has a minor performance +// cost, as duplicate writes of the same data are idempotent. func bootstrapEpochForProtocolStateEntry( + rw storage.ReaderBatchWriter, epochProtocolStateSnapshots storage.EpochProtocolStateEntries, epochSetups storage.EpochSetups, epochCommits storage.EpochCommits, epochProtocolStateEntry protocol.EpochProtocolState, verifyNetworkAddress bool, -) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - richEntry := epochProtocolStateEntry.Entry() - - // keep track of EpochSetup/EpochCommit service events, then store them after this step is complete - var setups []*flow.EpochSetup - var commits []*flow.EpochCommit - - // validate and insert previous epoch if it exists - if epochProtocolStateEntry.PreviousEpochExists() { - // if there is a previous epoch, both setup and commit events must exist - setup := richEntry.PreviousEpochSetup - commit := richEntry.PreviousEpochCommit - - if err := protocol.IsValidEpochSetup(setup, verifyNetworkAddress); err != nil { - return fmt.Errorf("invalid EpochSetup for previous epoch: %w", err) - } - if err := protocol.IsValidEpochCommit(commit, setup); err != nil { - return fmt.Errorf("invalid EpochCommit for previous epoch: %w", err) - } +) error { + richEntry := epochProtocolStateEntry.Entry() - setups = append(setups, setup) - commits = append(commits, commit) + // keep track of EpochSetup/EpochCommit service events, then store them after this step is complete + var setups []*flow.EpochSetup + var commits []*flow.EpochCommit + + // validate and insert previous epoch if it exists + if epochProtocolStateEntry.PreviousEpochExists() { + // if there is a previous epoch, both setup and commit events must exist + setup := richEntry.PreviousEpochSetup + commit := richEntry.PreviousEpochCommit + + if err := protocol.IsValidEpochSetup(setup, verifyNetworkAddress); err != nil { + return fmt.Errorf("invalid EpochSetup for previous epoch: %w", err) + } + if err := protocol.IsValidEpochCommit(commit, setup); err != nil { + return fmt.Errorf("invalid EpochCommit for previous epoch: %w", err) } - // validate and insert current epoch + setups = append(setups, setup) + commits = append(commits, commit) + } + + { // validate and insert current epoch (always exist) setup := richEntry.CurrentEpochSetup commit := richEntry.CurrentEpochCommit @@ -526,115 +637,111 @@ func bootstrapEpochForProtocolStateEntry( setups = append(setups, setup) commits = append(commits, commit) + } - // validate and insert next epoch, if it exists - if richEntry.NextEpoch != nil { - setup := richEntry.NextEpochSetup // must not be nil - commit := richEntry.NextEpochCommit // may be nil + // validate and insert next epoch, if it exists + if richEntry.NextEpoch != nil { + setup := richEntry.NextEpochSetup // must not be nil + commit := richEntry.NextEpochCommit // may be nil - if err := protocol.IsValidEpochSetup(setup, verifyNetworkAddress); err != nil { - return fmt.Errorf("invalid EpochSetup for next epoch: %w", err) - } - setups = append(setups, setup) - - if commit != nil { - if err := protocol.IsValidEpochCommit(commit, setup); err != nil { - return fmt.Errorf("invalid EpochCommit for next epoch: %w", err) - } - commits = append(commits, commit) - } + if err := protocol.IsValidEpochSetup(setup, verifyNetworkAddress); err != nil { + return fmt.Errorf("invalid EpochSetup for next epoch: %w", err) } + setups = append(setups, setup) - // insert all epoch setup/commit service events - // dynamic protocol state relies on these events being stored - for _, setup := range setups { - err := epochSetups.StoreTx(setup)(tx) - if err != nil { - return fmt.Errorf("could not store epoch setup event: %w", err) - } - } - for _, commit := range commits { - err := epochCommits.StoreTx(commit)(tx) - if err != nil { - return fmt.Errorf("could not store epoch commit event: %w", err) + if commit != nil { + if err := protocol.IsValidEpochCommit(commit, setup); err != nil { + return fmt.Errorf("invalid EpochCommit for next epoch: %w", err) } + commits = append(commits, commit) } + } - // insert epoch protocol state entry, which references above service events - err := operation.SkipDuplicatesTx(epochProtocolStateSnapshots.StoreTx(richEntry.ID(), richEntry.MinEpochStateEntry))(tx) + // insert all epoch setup/commit service events + // dynamic protocol state relies on these events being stored + for _, setup := range setups { + err := epochSetups.BatchStore(rw, setup) if err != nil { - return fmt.Errorf("could not store epoch protocol state entry: %w", err) + return fmt.Errorf("could not store epoch setup event: %w", err) } - return nil } + for _, commit := range commits { + err := epochCommits.BatchStore(rw, commit) + if err != nil { + return fmt.Errorf("could not store epoch commit event: %w", err) + } + } + + // insert epoch protocol state entry, which references above service events + err := epochProtocolStateSnapshots.BatchStore(rw.Writer(), richEntry.ID(), richEntry.MinEpochStateEntry) + if err != nil { + return fmt.Errorf("could not store epoch protocol state entry: %w", err) + } + return nil } // bootstrapSporkInfo bootstraps the protocol state with information about the // spork which is used to disambiguate Flow networks. -func bootstrapSporkInfo(root protocol.Snapshot) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - bdtx := tx.DBTxn // tx is just a wrapper around a badger transaction with the additional ability to register callbacks that are executed after the badger transaction completed _successfully_ - - params := root.Params() - sporkID := params.SporkID() - err := operation.InsertSporkID(sporkID)(bdtx) - if err != nil { - return fmt.Errorf("could not insert spork ID: %w", err) - } - - sporkRootBlockHeight := params.SporkRootBlockHeight() - err = operation.InsertSporkRootBlockHeight(sporkRootBlockHeight)(bdtx) - if err != nil { - return fmt.Errorf("could not insert spork root block height: %w", err) - } +func bootstrapSporkInfo(rw storage.ReaderBatchWriter, root protocol.Snapshot) error { + w := rw.Writer() + params := root.Params() + sporkID := params.SporkID() + err := operation.InsertSporkID(w, sporkID) + if err != nil { + return fmt.Errorf("could not insert spork ID: %w", err) + } - return nil + sporkRootBlockHeight := params.SporkRootBlockHeight() + err = operation.InsertSporkRootBlockHeight(w, sporkRootBlockHeight) + if err != nil { + return fmt.Errorf("could not insert spork root block height: %w", err) } + + return nil } // indexEpochHeights populates the epoch height index from the root snapshot. // We index the FirstHeight for every epoch where the transition occurs within the sealing segment of the root snapshot, // or for the first epoch of a spork if the snapshot is a spork root snapshot (1 block sealing segment). // No errors are expected during normal operation. -func indexEpochHeights(segment *flow.SealingSegment) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - // CASE 1: For spork root snapshots, there is exactly one block B and one epoch E. - // Index `E.counter → B.Height`. - if segment.IsSporkRoot() { - counter := segment.LatestProtocolStateEntry().EpochEntry.EpochCounter() - firstHeight := segment.Highest().Header.Height - err := operation.InsertEpochFirstHeight(counter, firstHeight)(tx) - if err != nil { - return fmt.Errorf("could not index first height %d for epoch %d: %w", firstHeight, counter, err) - } - return nil +func indexEpochHeights(lctx lockctx.Proof, rw storage.ReaderBatchWriter, segment *flow.SealingSegment) error { + // CASE 1: For spork root snapshots, there is exactly one block B and one epoch E. + // Index `E.counter → B.Height`. + if segment.IsSporkRoot() { + counter := segment.LatestProtocolStateEntry().EpochEntry.EpochCounter() + firstHeight := segment.Highest().Header.Height + err := operation.InsertEpochFirstHeight(lctx, rw, counter, firstHeight) + if err != nil { + return fmt.Errorf("could not index first height %d for epoch %d: %w", firstHeight, counter, err) } + return nil + } - // CASE 2: For all other snapshots, there is a segment of blocks which may span several epochs. - // We traverse all blocks in the segment in ascending height order. - // If we find two consecutive blocks B1, B2 so that `B1.EpochCounter` != `B2.EpochCounter`, - // then index `B2.EpochCounter → B2.Height`. - allBlocks := segment.AllBlocks() - lastBlock := allBlocks[0] - lastBlockEpochCounter := segment.ProtocolStateEntries[lastBlock.Payload.ProtocolStateID].EpochEntry.EpochCounter() - for _, block := range allBlocks[1:] { - thisBlockEpochCounter := segment.ProtocolStateEntries[block.Payload.ProtocolStateID].EpochEntry.EpochCounter() - if lastBlockEpochCounter != thisBlockEpochCounter { - firstHeight := block.Header.Height - err := operation.InsertEpochFirstHeight(thisBlockEpochCounter, firstHeight)(tx) - if err != nil { - return fmt.Errorf("could not index first height %d for epoch %d: %w", firstHeight, thisBlockEpochCounter, err) - } + // CASE 2: For all other snapshots, there is a segment of blocks which may span several epochs. + // We traverse all blocks in the segment in ascending height order. + // If we find two consecutive blocks B1, B2 so that `B1.EpochCounter` != `B2.EpochCounter`, + // then index `B2.EpochCounter → B2.Height`. + allBlocks := segment.AllBlocks() + lastBlock := allBlocks[0] + lastBlockEpochCounter := segment.ProtocolStateEntries[lastBlock.Payload.ProtocolStateID].EpochEntry.EpochCounter() + for _, block := range allBlocks[1:] { + thisBlockEpochCounter := segment.ProtocolStateEntries[block.Payload.ProtocolStateID].EpochEntry.EpochCounter() + if lastBlockEpochCounter != thisBlockEpochCounter { + firstHeight := block.Header.Height + err := operation.InsertEpochFirstHeight(lctx, rw, thisBlockEpochCounter, firstHeight) + if err != nil { + return fmt.Errorf("could not index first height %d for epoch %d: %w", firstHeight, thisBlockEpochCounter, err) } - lastBlockEpochCounter = thisBlockEpochCounter } - return nil + lastBlockEpochCounter = thisBlockEpochCounter } + return nil } func OpenState( metrics module.ComplianceMetrics, - db *badger.DB, + db storage.DB, + lockManager lockctx.Manager, headers storage.Headers, seals storage.Seals, results storage.ExecutionResults, @@ -653,15 +760,15 @@ func OpenState( if !isBootstrapped { return nil, fmt.Errorf("expected database to contain bootstrapped state") } - globalParams, err := ReadGlobalParams(db) + globalParams, err := datastore.ReadGlobalParams(db.Reader()) if err != nil { return nil, fmt.Errorf("could not read global params: %w", err) } - instanceParams, err := ReadInstanceParams(db, headers, seals) + instanceParams, err := datastore.ReadInstanceParams(db.Reader(), headers, seals) if err != nil { return nil, fmt.Errorf("could not read instance params: %w", err) } - params := &Params{ + params := &datastore.Params{ GlobalParams: globalParams, InstanceParams: instanceParams, } @@ -669,6 +776,7 @@ func OpenState( state, err := newState( metrics, db, + lockManager, headers, seals, results, @@ -685,20 +793,32 @@ func OpenState( return nil, fmt.Errorf("could not create state: %w", err) } - // report last finalized and sealed block height + // report information about latest known finalized block finalSnapshot := state.Final() - head, err := finalSnapshot.Head() + latestFinalizedHeader, err := finalSnapshot.Head() if err != nil { return nil, fmt.Errorf("unexpected error to get finalized block: %w", err) } - metrics.FinalizedHeight(head.Height) + latestFinalizedBlock, err := state.blocks.ByHeight(latestFinalizedHeader.Height) + if err != nil { + return nil, fmt.Errorf("could not retrieve the latest sealed block by height: %w", err) + } + metrics.FinalizedHeight(latestFinalizedHeader.Height) + metrics.BlockFinalized(latestFinalizedBlock) - sealed, err := state.Sealed().Head() + // report information about latest known finalized block + latestSealedHeader, err := state.Sealed().Head() + if err != nil { + return nil, fmt.Errorf("could not get latest sealed block header: %w", err) + } + latestSealedBlock, err := state.blocks.ByHeight(latestSealedHeader.Height) if err != nil { - return nil, fmt.Errorf("could not get latest sealed block: %w", err) + return nil, fmt.Errorf("could not retrieve the latest sealed block by height: %w", err) } - metrics.SealedHeight(sealed.Height) + metrics.SealedHeight(latestSealedHeader.Height) + metrics.BlockSealed(latestSealedBlock) + // report information about latest known epoch err = updateEpochMetrics(metrics, finalSnapshot) if err != nil { return nil, fmt.Errorf("failed to update epoch metrics: %w", err) @@ -772,7 +892,8 @@ func (state *State) AtBlockID(blockID flow.Identifier) protocol.Snapshot { // is expected to contain an already bootstrapped state or not func newState( metrics module.ComplianceMetrics, - db *badger.DB, + db storage.DB, + lockManager lockctx.Manager, headers storage.Headers, seals storage.Seals, results storage.ExecutionResults, @@ -786,13 +907,14 @@ func newState( params protocol.Params, ) (*State, error) { state := &State{ - metrics: metrics, - db: db, - headers: headers, - results: results, - seals: seals, - blocks: blocks, - qcs: qcs, + metrics: metrics, + db: db, + lockManager: lockManager, + headers: headers, + results: results, + seals: seals, + blocks: blocks, + qcs: qcs, epoch: struct { setups storage.EpochSetups commits storage.EpochCommits @@ -823,9 +945,9 @@ func newState( } // IsBootstrapped returns whether the database contains a bootstrapped state -func IsBootstrapped(db *badger.DB) (bool, error) { +func IsBootstrapped(db storage.DB) (bool, error) { var finalized uint64 - err := db.View(operation.RetrieveFinalizedHeight(&finalized)) + err := operation.RetrieveFinalizedHeight(db.Reader(), &finalized) if errors.Is(err, storage.ErrNotFound) { return false, nil } @@ -861,17 +983,15 @@ func updateEpochMetrics(metrics module.ComplianceMetrics, snap protocol.Snapshot // boostrapVersionBeacon bootstraps version beacon, by adding the latest beacon // to an index, if present. -func boostrapVersionBeacon(snapshot protocol.Snapshot) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - versionBeacon, err := snapshot.VersionBeacon() - if err != nil { - return err - } - if versionBeacon == nil { - return nil - } - return operation.IndexVersionBeaconByHeight(versionBeacon)(tx.DBTxn) +func boostrapVersionBeacon(rw storage.ReaderBatchWriter, snapshot protocol.Snapshot) error { + versionBeacon, err := snapshot.VersionBeacon() + if err != nil { + return err } + if versionBeacon == nil { + return nil + } + return operation.IndexVersionBeaconByHeight(rw.Writer(), versionBeacon) } // populateCache is used after opening or bootstrapping the state to populate the cache. @@ -879,47 +999,41 @@ func boostrapVersionBeacon(snapshot protocol.Snapshot) func(*transaction.Tx) err // No errors expected during normal operations. func (state *State) populateCache() error { // cache the initial value for finalized block - err := state.db.View(func(tx *badger.Txn) error { - // finalized header - var finalizedHeight uint64 - err := operation.RetrieveFinalizedHeight(&finalizedHeight)(tx) - if err != nil { - return fmt.Errorf("could not lookup finalized height: %w", err) - } - var cachedLatest cachedLatest - err = operation.LookupBlockHeight(finalizedHeight, &cachedLatest.finalizedID)(tx) - if err != nil { - return fmt.Errorf("could not lookup finalized id (height=%d): %w", finalizedHeight, err) - } - cachedLatest.finalizedHeader, err = state.headers.ByBlockID(cachedLatest.finalizedID) - if err != nil { - return fmt.Errorf("could not get finalized block (id=%x): %w", cachedLatest.finalizedID, err) - } - // sealed header - var sealedHeight uint64 - err = operation.RetrieveSealedHeight(&sealedHeight)(tx) - if err != nil { - return fmt.Errorf("could not lookup sealed height: %w", err) - } - err = operation.LookupBlockHeight(sealedHeight, &cachedLatest.sealedID)(tx) - if err != nil { - return fmt.Errorf("could not lookup sealed id (height=%d): %w", sealedHeight, err) - } - cachedLatest.sealedHeader, err = state.headers.ByBlockID(cachedLatest.sealedID) - if err != nil { - return fmt.Errorf("could not get sealed block (id=%x): %w", cachedLatest.sealedID, err) - } - state.cachedLatest.Store(&cachedLatest) - - state.finalizedRootHeight = state.Params().FinalizedRoot().Height - state.sealedRootHeight = state.Params().SealedRoot().Height - state.sporkRootBlockHeight = state.Params().SporkRootBlockHeight() - - return nil - }) + // finalized header + r := state.db.Reader() + var finalizedHeight uint64 + err := operation.RetrieveFinalizedHeight(r, &finalizedHeight) + if err != nil { + return fmt.Errorf("could not lookup finalized height: %w", err) + } + var cachedLatest cachedLatest + err = operation.LookupBlockHeight(r, finalizedHeight, &cachedLatest.finalizedID) if err != nil { - return fmt.Errorf("could not cache finalized header: %w", err) + return fmt.Errorf("could not lookup finalized id (height=%d): %w", finalizedHeight, err) } + cachedLatest.finalizedHeader, err = state.headers.ByBlockID(cachedLatest.finalizedID) + if err != nil { + return fmt.Errorf("could not get finalized block (id=%x): %w", cachedLatest.finalizedID, err) + } + // sealed header + var sealedHeight uint64 + err = operation.RetrieveSealedHeight(r, &sealedHeight) + if err != nil { + return fmt.Errorf("could not lookup sealed height: %w", err) + } + err = operation.LookupBlockHeight(r, sealedHeight, &cachedLatest.sealedID) + if err != nil { + return fmt.Errorf("could not lookup sealed id (height=%d): %w", sealedHeight, err) + } + cachedLatest.sealedHeader, err = state.headers.ByBlockID(cachedLatest.sealedID) + if err != nil { + return fmt.Errorf("could not get sealed block (id=%x): %w", cachedLatest.sealedID, err) + } + state.cachedLatest.Store(&cachedLatest) + + state.finalizedRootHeight = state.Params().FinalizedRoot().Height + state.sealedRootHeight = state.Params().SealedRoot().Height + state.sporkRootBlockHeight = state.Params().SporkRootBlockHeight() return nil } diff --git a/state/protocol/badger/state_test.go b/state/protocol/badger/state_test.go index 7b068e39945..a9f591e2598 100644 --- a/state/protocol/badger/state_test.go +++ b/state/protocol/badger/state_test.go @@ -19,8 +19,10 @@ import ( "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/state/protocol/util" protoutil "github.com/onflow/flow-go/state/protocol/util" + "github.com/onflow/flow-go/storage" bstorage "github.com/onflow/flow-go/storage/badger" storagebadger "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/utils/unittest" ) @@ -35,6 +37,7 @@ func TestBootstrapAndOpen(t *testing.T) { }) protoutil.RunWithBootstrapState(t, rootSnapshot, func(db *badger.DB, _ *bprotocol.State) { + lockManager := storage.NewTestingLockManager() // expect the final view metric to be set to current epoch's final view epoch, err := rootSnapshot.Epochs().Current() require.NoError(t, err) @@ -46,7 +49,9 @@ func TestBootstrapAndOpen(t *testing.T) { complianceMetrics.On("CurrentEpochCounter", counter).Once() complianceMetrics.On("CurrentEpochPhase", phase).Once() complianceMetrics.On("CurrentEpochFinalView", epoch.FinalView()).Once() + complianceMetrics.On("BlockFinalized", testmock.Anything).Once() complianceMetrics.On("FinalizedHeight", testmock.Anything).Once() + complianceMetrics.On("BlockSealed", testmock.Anything).Once() complianceMetrics.On("SealedHeight", testmock.Anything).Once() complianceMetrics.On("CurrentDKGPhaseViews", @@ -57,7 +62,8 @@ func TestBootstrapAndOpen(t *testing.T) { // protocol state has been bootstrapped, now open a protocol state with the database state, err := bprotocol.OpenState( complianceMetrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -108,6 +114,7 @@ func TestBootstrapAndOpen_EpochCommitted(t *testing.T) { }) protoutil.RunWithBootstrapState(t, committedPhaseSnapshot, func(db *badger.DB, _ *bprotocol.State) { + lockManager := storage.NewTestingLockManager() complianceMetrics := new(mock.ComplianceMetrics) @@ -121,19 +128,21 @@ func TestBootstrapAndOpen_EpochCommitted(t *testing.T) { phase, err := committedPhaseSnapshot.EpochPhase() require.NoError(t, err) complianceMetrics.On("CurrentEpochPhase", phase).Once() - complianceMetrics.On("CurrentEpochFinalView", currentEpoch.FinalView()).Once() + complianceMetrics.On("CurrentDKGPhaseViews", currentEpoch.DKGPhase1FinalView(), currentEpoch.DKGPhase2FinalView(), currentEpoch.DKGPhase3FinalView()).Once() - complianceMetrics.On("CurrentDKGPhaseViews", - currentEpoch.DKGPhase1FinalView(), currentEpoch.DKGPhase2FinalView(), currentEpoch.DKGPhase3FinalView()).Once() + // expect finalized and sealed to be set to the latest block complianceMetrics.On("FinalizedHeight", testmock.Anything).Once() + complianceMetrics.On("BlockFinalized", testmock.Anything).Once() complianceMetrics.On("SealedHeight", testmock.Anything).Once() + complianceMetrics.On("BlockSealed", testmock.Anything).Once() noopMetrics := new(metrics.NoopCollector) all := storagebadger.InitAll(noopMetrics, db) state, err := bprotocol.OpenState( complianceMetrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -381,6 +390,96 @@ func TestBootstrapNonRoot(t *testing.T) { }) }) + // should be able to bootstrap from snapshot when the sealing segment contains + // a block which references a result included outside the sealing segment. + // In this case, B2 contains the result for B1, but is omitted from the segment. + // B3 contains only the receipt for B1 and is included in the segment. + // + // Extra Blocks Sealing Segment + // [-----------------------][--------------------------------------] + // ROOT <- B1 <- B2(Receipt1a,Result1) <- B3(Receipt1b) <- ... <- G1 <- G2(R[G1]) <- G3(Seal[G1]) + t.Run("with detached execution result reference in sealing segment", func(t *testing.T) { + after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState, mutableState protocol.MutableProtocolState) protocol.Snapshot { + block1 := unittest.BlockWithParentFixture(rootBlock) + block1.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) + buildFinalizedBlock(t, state, block1) + + receipt1a, seal1 := unittest.ReceiptAndSealForBlock(block1) + receipt1b := unittest.ExecutionReceiptFixture(unittest.WithResult(&receipt1a.ExecutionResult)) + + block2 := unittest.BlockWithParentFixture(block1.Header) + block2.SetPayload(unittest.PayloadFixture( + unittest.WithReceipts(receipt1a), + unittest.WithProtocolStateID(rootProtocolStateID))) + buildFinalizedBlock(t, state, block2) + + block3 := unittest.BlockWithParentFixture(block2.Header) + block3.SetPayload(unittest.PayloadFixture( + unittest.WithReceiptsAndNoResults(receipt1b), + unittest.WithProtocolStateID(rootProtocolStateID))) + buildFinalizedBlock(t, state, block3) + + receipt2, seal2 := unittest.ReceiptAndSealForBlock(block2) + receipt3, seal3 := unittest.ReceiptAndSealForBlock(block3) + + receipts := []*flow.ExecutionReceipt{receipt2, receipt3} + seals := []*flow.Seal{seal1, seal2, seal3} + + parent := block3 + for i := 0; i < flow.DefaultTransactionExpiry-1; i++ { + next := unittest.BlockWithParentFixture(parent.Header) + next.SetPayload(unittest.PayloadFixture( + unittest.WithReceipts(receipts[0]), + unittest.WithProtocolStateID(calculateExpectedStateId(t, mutableState)(next.Header, []*flow.Seal{seals[0]})), + unittest.WithSeals(seals[0]))) + seals, receipts = seals[1:], receipts[1:] + + nextReceipt, nextSeal := unittest.ReceiptAndSealForBlock(next) + receipts = append(receipts, nextReceipt) + seals = append(seals, nextSeal) + buildFinalizedBlock(t, state, next) + parent = next + } + + // G1 adds all receipts from all blocks before G1 + blockG1 := unittest.BlockWithParentFixture(parent.Header) + blockG1.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipts...), unittest.WithProtocolStateID(parent.Payload.ProtocolStateID))) + buildFinalizedBlock(t, state, blockG1) + + receiptS1, sealS1 := unittest.ReceiptAndSealForBlock(blockG1) + + // G2 adds all seals from all blocks before G1 + blockG2 := unittest.BlockWithParentFixture(blockG1.Header) + blockG2.SetPayload(unittest.PayloadFixture( + unittest.WithSeals(seals...), + unittest.WithProtocolStateID(calculateExpectedStateId(t, mutableState)(blockG2.Header, seals)), + unittest.WithReceipts(receiptS1))) + buildFinalizedBlock(t, state, blockG2) + + // G3 seals G1, creating a sealing segment + blockG3 := unittest.BlockWithParentFixture(blockG2.Header) + blockG3.SetPayload(unittest.PayloadFixture( + unittest.WithSeals(sealS1), + unittest.WithProtocolStateID(calculateExpectedStateId(t, mutableState)(blockG3.Header, []*flow.Seal{sealS1})))) + buildFinalizedBlock(t, state, blockG3) + + child := unittest.BlockWithParentFixture(blockG3.Header) + child.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(blockG3.Payload.ProtocolStateID))) + buildFinalizedBlock(t, state, child) + + return state.AtBlockID(blockG3.ID()) + }) + + segment, err := after.SealingSegment() + require.NoError(t, err) + // To accurately test the desired edge case we require that the lowest block in ExtraBlocks is B3 + assert.Equal(t, uint64(3), segment.ExtraBlocks[0].Header.Height) + + bootstrap(t, after, func(state *bprotocol.State, err error) { + require.NoError(t, err) + }) + }) + // should be able to bootstrap from snapshot after entering EFM because of sealing invalid service event // ROOT <- B1 <- B2(R1) <- B3(S1) <- CHILD t.Run("in EFM", func(t *testing.T) { @@ -719,11 +818,13 @@ func bootstrap(t *testing.T, rootSnapshot protocol.Snapshot, f func(*bprotocol.S dir := unittest.TempDir(t) defer os.RemoveAll(dir) db := unittest.BadgerDB(t, dir) + lockManager := storage.NewTestingLockManager() defer db.Close() all := bstorage.InitAll(metrics, db) state, err := bprotocol.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, diff --git a/state/protocol/chain_state.go b/state/protocol/chain_state.go index 699026d8380..6f397a27ad7 100644 --- a/state/protocol/chain_state.go +++ b/state/protocol/chain_state.go @@ -53,9 +53,22 @@ type FollowerState interface { // CAUTION: // - This function expects that `qc` has been validated. (otherwise, the state will be corrupted) // - The parent block must already be stored. - // Orphaned blocks are excepted. + // - Attempts to extend the state with the _same block concurrently_ are not allowed. + // (will not corrupt the state, but may lead to an exception) + // + // Aside from the requirement that ancestors must have been previously ingested, all blocks are + // accepted; no matter how old they are; or whether they are orphaned or not. + // + // Note: To ensure that all ancestors of a candidate block are correct and known to the FollowerState, some external + // ordering and queuing of incoming blocks is generally necessary (responsibility of Compliance Layer). Once a block + // is successfully ingested, repeated extension requests with this block are no-ops. This is convenient for the + // Compliance Layer after a crash, so it doesn't have to worry about which blocks have already been ingested before + // the crash. However, while running it is very easy for the Compliance Layer to avoid concurrent extension requests + // with the same block. Hence, for simplicity, the FollowerState may reject such requests with an exception. // // No errors are expected during normal operations. + // - In case of concurrent calls with the same `candidate` block, `ExtendCertified` may return a [storage.ErrAlreadyExists] + // or it may gracefully return. At the moment, `ExtendCertified` should be considered as not concurrency-safe. ExtendCertified(ctx context.Context, candidate *flow.Block, qc *flow.QuorumCertificate) error // Finalize finalizes the block with the given hash. @@ -79,11 +92,30 @@ type ParticipantState interface { // still checking that the given block is a valid extension of the protocol state. // The candidate block must have passed HotStuff validation before being passed to Extend. // - // CAUTION: per convention, the protocol state requires that the candidate's - // parent has already been ingested. Otherwise, an exception is returned. + // CAUTION: + // - per convention, the protocol state requires that the candidate's + // parent has already been ingested. Otherwise, an exception is returned. + // - Attempts to extend the state with the _same block concurrently_ are not allowed. + // (will not corrupt the state, but may lead to an exception) + // - We reject orphaned blocks with [state.OutdatedExtensionError] ! + // This is more performant, but requires careful handling by the calling code. Specifically, + // the caller should not just drop orphaned blocks from the cache to avoid wasteful re-requests. + // If we were to entirely forget orphaned blocks, e.g. block X of the orphaned fork X ← Y ← Z, + // we might not have enough information to reject blocks Y, Z later if we receive them. We would + // re-request X, then determine it is orphaned and drop it, attempt to ingest Y re-request the + // unknown parent X and repeat potentially very often. + // + // Note: To ensure that all ancestors of a candidate block are correct and known to the Protocol State, some external + // ordering and queuing of incoming blocks is generally necessary (responsibility of Compliance Layer). Once a block + // is successfully ingested, repeated extension requests with this block are no-ops. This is convenient for the + // Compliance Layer after a crash, so it doesn't have to worry about which blocks have already been ingested before + // the crash. However, while running it is very easy for the Compliance Layer to avoid concurrent extension requests + // with the same block. Hence, for simplicity, the FollowerState may reject such requests with an exception. // // Expected errors during normal operations: - // * state.OutdatedExtensionError if the candidate block is outdated (e.g. orphaned) + // * state.OutdatedExtensionError if the candidate block is orphaned // * state.InvalidExtensionError if the candidate block is invalid + // * In case of concurrent calls with the same `candidate` block, `Extend` may return a [storage.ErrAlreadyExists] + // or it may gracefully return. At the moment, `Extend` should be considered as not concurrency-safe. Extend(ctx context.Context, candidate *flow.Block) error } diff --git a/state/protocol/datastore/params.go b/state/protocol/datastore/params.go new file mode 100644 index 00000000000..975522130ff --- /dev/null +++ b/state/protocol/datastore/params.go @@ -0,0 +1,158 @@ +package datastore + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type Params struct { + protocol.GlobalParams + protocol.InstanceParams +} + +var _ protocol.Params = (*Params)(nil) + +// InstanceParams implements the interface [protocol.InstanceParams]. All values +// are cached after construction and do not incur database reads. +type InstanceParams struct { + // finalizedRoot marks the cutoff of the history this node knows about. It is the block at the tip + // of the root snapshot used to bootstrap this node - all newer blocks are synced from the network. + finalizedRoot *flow.Header + // sealedRoot is the latest sealed block with respect to `finalizedRoot`. + sealedRoot *flow.Header + // rootSeal is the seal for block `sealedRoot` - the newest incorporated seal with respect to `finalizedRoot`. + rootSeal *flow.Seal +} + +var _ protocol.InstanceParams = (*InstanceParams)(nil) + +// ReadInstanceParams reads the instance parameters from the database and returns them as in-memory representation. +// No errors are expected during normal operation. +func ReadInstanceParams(r storage.Reader, headers storage.Headers, seals storage.Seals) (*InstanceParams, error) { + params := &InstanceParams{} + + // The values below are written during bootstrapping and immutable for the lifetime of the node. All + // following parameters are uniquely defined by the values initially read. No atomicity is required. + var ( + finalizedRootHeight uint64 // height of the highest finalized block contained in the root snapshot + sealedRootHeight uint64 // height of the highest sealed block contained in the root snapshot + ) + + // root height + err := operation.RetrieveRootHeight(r, &finalizedRootHeight) + if err != nil { + return nil, fmt.Errorf("could not read root block to populate cache: %w", err) + } + // sealed root height + err = operation.RetrieveSealedRootHeight(r, &sealedRootHeight) + if err != nil { + return nil, fmt.Errorf("could not read sealed root block to populate cache: %w", err) + } + + // look up 'finalized root block' + var finalizedRootID flow.Identifier + err = operation.LookupBlockHeight(r, finalizedRootHeight, &finalizedRootID) + if err != nil { + return nil, fmt.Errorf("could not look up finalized root height: %w", err) + } + params.finalizedRoot, err = headers.ByBlockID(finalizedRootID) + if err != nil { + return nil, fmt.Errorf("could not retrieve finalized root header: %w", err) + } + + // look up the sealed block as of the 'finalized root block' + var sealedRootID flow.Identifier + err = operation.LookupBlockHeight(r, sealedRootHeight, &sealedRootID) + if err != nil { + return nil, fmt.Errorf("could not look up sealed root height: %w", err) + } + params.sealedRoot, err = headers.ByBlockID(sealedRootID) + if err != nil { + return nil, fmt.Errorf("could not retrieve sealed root header: %w", err) + } + + // retrieve the root seal + params.rootSeal, err = seals.HighestInFork(finalizedRootID) + if err != nil { + return nil, fmt.Errorf("could not retrieve root seal: %w", err) + } + + return params, nil +} + +// FinalizedRoot returns the finalized root header of the current protocol state. This will be +// the head of the protocol state snapshot used to bootstrap this state and +// may differ from node to node for the same protocol state. +func (p *InstanceParams) FinalizedRoot() *flow.Header { + return p.finalizedRoot +} + +// SealedRoot returns the sealed root block. If it's different from FinalizedRoot() block, +// it means the node is bootstrapped from mid-spork. +func (p *InstanceParams) SealedRoot() *flow.Header { + return p.sealedRoot +} + +// Seal returns the root block seal of the current protocol state. This is the seal for the +// `SealedRoot` block that was used to bootstrap this state. It may differ from node to node. +func (p *InstanceParams) Seal() *flow.Seal { + return p.rootSeal +} + +// ReadGlobalParams reads the global parameters from the database and returns them as in-memory representation. +// No errors are expected during normal operation. +func ReadGlobalParams(r storage.Reader) (*inmem.Params, error) { + var sporkID flow.Identifier + err := operation.RetrieveSporkID(r, &sporkID) + if err != nil { + return nil, fmt.Errorf("could not get spork id: %w", err) + } + + var sporkRootBlockHeight uint64 + err = operation.RetrieveSporkRootBlockHeight(r, &sporkRootBlockHeight) + if err != nil { + return nil, fmt.Errorf("could not get spork root block height: %w", err) + } + + root, err := ReadFinalizedRoot(r) // retrieve root header + if err != nil { + return nil, fmt.Errorf("could not get root: %w", err) + } + + return inmem.NewParams( + inmem.EncodableParams{ + ChainID: root.ChainID, + SporkID: sporkID, + SporkRootBlockHeight: sporkRootBlockHeight, + }, + ), nil +} + +// ReadFinalizedRoot retrieves the root block's header from the database. +// This information is immutable for the runtime of the software and may be cached. +func ReadFinalizedRoot(r storage.Reader) (*flow.Header, error) { + // The values below are written during bootstrapping and immutable for the lifetime of the node. All + // following parameters are uniquely defined by the values initially read. No atomicity is required. + var finalizedRootHeight uint64 + var rootID flow.Identifier + err := operation.RetrieveRootHeight(r, &finalizedRootHeight) + if err != nil { + return nil, fmt.Errorf("could not retrieve finalized root height: %w", err) + } + err = operation.LookupBlockHeight(r, finalizedRootHeight, &rootID) // look up root block ID + if err != nil { + return nil, fmt.Errorf("could not retrieve root header's ID by height: %w", err) + } + + var rootHeader flow.Header + err = operation.RetrieveHeader(r, rootID, &rootHeader) // retrieve root header + if err != nil { + return nil, fmt.Errorf("could not retrieve root header: %w", err) + } + return &rootHeader, nil +} diff --git a/state/protocol/badger/validity.go b/state/protocol/datastore/validity.go similarity index 99% rename from state/protocol/badger/validity.go rename to state/protocol/datastore/validity.go index e710009639b..f3978786215 100644 --- a/state/protocol/badger/validity.go +++ b/state/protocol/datastore/validity.go @@ -1,4 +1,4 @@ -package badger +package datastore import ( "fmt" diff --git a/state/protocol/badger/validity_test.go b/state/protocol/datastore/validity_test.go similarity index 99% rename from state/protocol/badger/validity_test.go rename to state/protocol/datastore/validity_test.go index 3a9bb04d9c1..b9a3db5e135 100644 --- a/state/protocol/badger/validity_test.go +++ b/state/protocol/datastore/validity_test.go @@ -1,4 +1,4 @@ -package badger +package datastore import ( "testing" diff --git a/state/protocol/inmem/convert.go b/state/protocol/inmem/convert.go index 3e51b35cc9c..5dad5a4208e 100644 --- a/state/protocol/inmem/convert.go +++ b/state/protocol/inmem/convert.go @@ -122,7 +122,10 @@ func SnapshotFromBootstrapStateWithParams( SporkRootBlockHeight: root.Header.Height, // use root block height as the spork root block height } - rootMinEpochState := EpochProtocolStateFromServiceEvents(setup, commit) + rootMinEpochState, err := EpochProtocolStateFromServiceEvents(setup, commit) + if err != nil { + return nil, fmt.Errorf("could not construct epoch protocol state: %w", err) + } rootEpochStateID := rootMinEpochState.ID() rootKvStore, err := kvStoreFactory(rootEpochStateID) if err != nil { @@ -137,7 +140,17 @@ func SnapshotFromBootstrapStateWithParams( return nil, fmt.Errorf("could not encode kvstore: %w", err) } - rootEpochState, err := flow.NewEpochStateEntry(rootMinEpochState, nil, nil, setup, commit, nil, nil) + rootEpochState, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: rootMinEpochState, + PreviousEpochSetup: nil, + PreviousEpochCommit: nil, + CurrentEpochSetup: setup, + CurrentEpochCommit: commit, + NextEpochSetup: nil, + NextEpochCommit: nil, + }, + ) if err != nil { return nil, fmt.Errorf("could not construct root epoch state entry: %w", err) } @@ -183,7 +196,7 @@ func SnapshotFromBootstrapStateWithParams( // that happened before should be reflected in the EpochSetup event. Specifically, ejected // nodes should be no longer listed in the EpochSetup event. // Hence, when the EpochSetup event is emitted / processed, the ejected flag is false for all epoch participants. -func EpochProtocolStateFromServiceEvents(setup *flow.EpochSetup, commit *flow.EpochCommit) *flow.MinEpochStateEntry { +func EpochProtocolStateFromServiceEvents(setup *flow.EpochSetup, commit *flow.EpochCommit) (*flow.MinEpochStateEntry, error) { identities := make(flow.DynamicIdentityEntryList, 0, len(setup.Participants)) for _, identity := range setup.Participants { identities = append(identities, &flow.DynamicIdentityEntry{ @@ -191,14 +204,24 @@ func EpochProtocolStateFromServiceEvents(setup *flow.EpochSetup, commit *flow.Ep Ejected: false, }) } - return &flow.MinEpochStateEntry{ - PreviousEpoch: nil, - CurrentEpoch: flow.EpochStateContainer{ + currentEpoch, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ SetupID: setup.ID(), CommitID: commit.ID(), ActiveIdentities: identities, + EpochExtensions: nil, }, - NextEpoch: nil, - EpochFallbackTriggered: false, + ) + if err != nil { + return nil, fmt.Errorf("could not construct current epoch state: %w", err) } + + return flow.NewMinEpochStateEntry( + flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: nil, + CurrentEpoch: *currentEpoch, + NextEpoch: nil, + EpochFallbackTriggered: false, + }, + ) } diff --git a/state/protocol/inmem/epoch.go b/state/protocol/inmem/epoch.go index 2faecc46364..790f46d6982 100644 --- a/state/protocol/inmem/epoch.go +++ b/state/protocol/inmem/epoch.go @@ -235,11 +235,14 @@ func (es *committedEpoch) Cluster(index uint) (protocol.Cluster, error) { } rootBlock := cluster.CanonicalRootBlock(epochCounter, members) - rootQC := &flow.QuorumCertificate{ + rootQC, err := flow.NewQuorumCertificate(flow.UntrustedQuorumCertificate{ View: rootBlock.Header.View, BlockID: rootBlock.ID(), SignerIndices: signerIndices, SigData: rootQCVoteData.SigData, + }) + if err != nil { + return nil, fmt.Errorf("could not build root quorum certificate: %w", err) } cluster, err := ClusterFromEncodable(EncodableCluster{ diff --git a/state/protocol/inmem/epoch_protocol_state.go b/state/protocol/inmem/epoch_protocol_state.go index 0f0022d484a..11d315638ac 100644 --- a/state/protocol/inmem/epoch_protocol_state.go +++ b/state/protocol/inmem/epoch_protocol_state.go @@ -8,18 +8,41 @@ import ( ) // EpochProtocolStateAdapter implements protocol.EpochProtocolState by wrapping a flow.RichEpochStateEntry. +// +//structwrite:immutable - mutations allowed only within the constructor type EpochProtocolStateAdapter struct { *flow.RichEpochStateEntry - params protocol.GlobalParams + Params protocol.GlobalParams } var _ protocol.EpochProtocolState = (*EpochProtocolStateAdapter)(nil) -func NewEpochProtocolStateAdapter(entry *flow.RichEpochStateEntry, params protocol.GlobalParams) *EpochProtocolStateAdapter { - return &EpochProtocolStateAdapter{ - RichEpochStateEntry: entry, - params: params, +// UntrustedEpochProtocolStateAdapter is an untrusted input-only representation of a EpochProtocolStateAdapter, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedEpochProtocolStateAdapter should be validated and converted into +// a trusted EpochProtocolStateAdapter using NewEpochProtocolStateAdapter constructor. +type UntrustedEpochProtocolStateAdapter EpochProtocolStateAdapter + +// NewEpochProtocolStateAdapter creates a new instance of EpochProtocolStateAdapter. +// Construction EpochProtocolStateAdapter allowed only within the constructor. +// +// All errors indicate a valid EpochProtocolStateAdapter cannot be constructed from the input. +func NewEpochProtocolStateAdapter(untrusted UntrustedEpochProtocolStateAdapter) (*EpochProtocolStateAdapter, error) { + if untrusted.Params == nil { + return nil, fmt.Errorf("params must not be nil") + } + if untrusted.RichEpochStateEntry == nil { + return nil, fmt.Errorf("rich epoch state must not be nil") } + return &EpochProtocolStateAdapter{ + RichEpochStateEntry: untrusted.RichEpochStateEntry, + Params: untrusted.Params, + }, nil } // Epoch returns the current epoch counter. @@ -69,7 +92,7 @@ func (s *EpochProtocolStateAdapter) Identities() flow.IdentityList { // GlobalParams returns spork-scoped global network parameters. func (s *EpochProtocolStateAdapter) GlobalParams() protocol.GlobalParams { - return s.params + return s.Params } // EpochFallbackTriggered denotes whether an invalid epoch state transition was attempted diff --git a/state/protocol/inmem/epoch_protocol_state_test.go b/state/protocol/inmem/epoch_protocol_state_test.go index a710ccd4352..60a478dc06f 100644 --- a/state/protocol/inmem/epoch_protocol_state_test.go +++ b/state/protocol/inmem/epoch_protocol_state_test.go @@ -21,7 +21,13 @@ func TestEpochProtocolStateAdapter(t *testing.T) { entry := unittest.EpochStateFixture(unittest.WithValidDKG()) globalParams := mock.NewGlobalParams(t) - adapter := inmem.NewEpochProtocolStateAdapter(entry, globalParams) + adapter, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry, + Params: globalParams, + }, + ) + require.NoError(t, err) t.Run("clustering", func(t *testing.T) { clustering, err := inmem.ClusteringFromSetupEvent(entry.CurrentEpochSetup) @@ -69,7 +75,13 @@ func TestEpochProtocolStateAdapter(t *testing.T) { }) t.Run("epoch-phase-staking", func(t *testing.T) { entry := unittest.EpochStateFixture() - adapter := inmem.NewEpochProtocolStateAdapter(entry, globalParams) + adapter, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry, + Params: globalParams, + }, + ) + require.NoError(t, err) assert.Equal(t, flow.EpochPhaseStaking, adapter.EpochPhase()) assert.True(t, adapter.PreviousEpochExists()) assert.False(t, adapter.EpochFallbackTriggered()) @@ -78,15 +90,28 @@ func TestEpochProtocolStateAdapter(t *testing.T) { entry := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) // cleanup the commit event, so we are in setup phase entry.NextEpoch.CommitID = flow.ZeroID + entry.NextEpochCommit = nil - adapter := inmem.NewEpochProtocolStateAdapter(entry, globalParams) + adapter, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry, + Params: globalParams, + }, + ) + require.NoError(t, err) assert.Equal(t, flow.EpochPhaseSetup, adapter.EpochPhase()) assert.True(t, adapter.PreviousEpochExists()) assert.False(t, adapter.EpochFallbackTriggered()) }) t.Run("epoch-phase-commit", func(t *testing.T) { entry := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) - adapter := inmem.NewEpochProtocolStateAdapter(entry, globalParams) + adapter, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry, + Params: globalParams, + }, + ) + require.NoError(t, err) assert.Equal(t, flow.EpochPhaseCommitted, adapter.EpochPhase()) assert.True(t, adapter.PreviousEpochExists()) assert.False(t, adapter.EpochFallbackTriggered()) @@ -96,7 +121,13 @@ func TestEpochProtocolStateAdapter(t *testing.T) { entry := unittest.EpochStateFixture(func(entry *flow.RichEpochStateEntry) { entry.EpochFallbackTriggered = true }) - adapter := inmem.NewEpochProtocolStateAdapter(entry, globalParams) + adapter, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry, + Params: globalParams, + }, + ) + require.NoError(t, err) assert.True(t, adapter.EpochFallbackTriggered()) assert.Equal(t, flow.EpochPhaseFallback, entry.EpochPhase()) }) @@ -104,7 +135,13 @@ func TestEpochProtocolStateAdapter(t *testing.T) { entry := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { entry.EpochFallbackTriggered = true }) - adapter := inmem.NewEpochProtocolStateAdapter(entry, globalParams) + adapter, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry, + Params: globalParams, + }, + ) + require.NoError(t, err) assert.True(t, adapter.EpochFallbackTriggered()) assert.Equal(t, flow.EpochPhaseCommitted, entry.EpochPhase()) }) @@ -115,7 +152,37 @@ func TestEpochProtocolStateAdapter(t *testing.T) { entry.PreviousEpochSetup = nil entry.PreviousEpochCommit = nil }) - adapter := inmem.NewEpochProtocolStateAdapter(entry, globalParams) + adapter, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry, + Params: globalParams, + }, + ) + require.NoError(t, err) assert.False(t, adapter.PreviousEpochExists()) }) + + // Invalid input with nil Params + t.Run("invalid - nil Params", func(t *testing.T) { + _, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: unittest.EpochStateFixture(), + Params: nil, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "params must not be nil") + }) + + // Invalid input with nil RichEpochStateEntry + t.Run("invalid - nil RichEpochStateEntry", func(t *testing.T) { + _, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: nil, + Params: globalParams, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "rich epoch state must not be nil") + }) } diff --git a/state/protocol/inmem/snapshot.go b/state/protocol/inmem/snapshot.go index ff1654262ae..9ec2293741a 100644 --- a/state/protocol/inmem/snapshot.go +++ b/state/protocol/inmem/snapshot.go @@ -106,7 +106,12 @@ func (s Snapshot) Encodable() EncodableSnapshot { func (s Snapshot) EpochProtocolState() (protocol.EpochProtocolState, error) { entry := s.enc.SealingSegment.LatestProtocolStateEntry() - return NewEpochProtocolStateAdapter(entry.EpochEntry, s.Params()), nil + return NewEpochProtocolStateAdapter( + UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry.EpochEntry, + Params: s.Params(), + }, + ) } func (s Snapshot) ProtocolState() (protocol.KVStoreReader, error) { diff --git a/state/protocol/kvstore_test.go b/state/protocol/kvstore_test.go index c4d01fedffd..f518e18106a 100644 --- a/state/protocol/kvstore_test.go +++ b/state/protocol/kvstore_test.go @@ -4,8 +4,8 @@ import ( "math/rand" "testing" + "github.com/ethereum/go-ethereum/rlp" clone "github.com/huandu/go-clone/generic" - "github.com/onflow/go-ethereum/rlp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/state/protocol/mock/mutable_protocol_state.go b/state/protocol/mock/mutable_protocol_state.go index 69885d1243b..8955b6c758e 100644 --- a/state/protocol/mock/mutable_protocol_state.go +++ b/state/protocol/mock/mutable_protocol_state.go @@ -4,11 +4,11 @@ package mock import ( flow "github.com/onflow/flow-go/model/flow" + deferred "github.com/onflow/flow-go/storage/deferred" + mock "github.com/stretchr/testify/mock" protocol "github.com/onflow/flow-go/state/protocol" - - transaction "github.com/onflow/flow-go/storage/badger/transaction" ) // MutableProtocolState is an autogenerated mock type for the MutableProtocolState type @@ -46,43 +46,34 @@ func (_m *MutableProtocolState) EpochStateAtBlockID(blockID flow.Identifier) (pr return r0, r1 } -// EvolveState provides a mock function with given fields: parentBlockID, candidateView, candidateSeals -func (_m *MutableProtocolState) EvolveState(parentBlockID flow.Identifier, candidateView uint64, candidateSeals []*flow.Seal) (flow.Identifier, *transaction.DeferredBlockPersist, error) { - ret := _m.Called(parentBlockID, candidateView, candidateSeals) +// EvolveState provides a mock function with given fields: deferredDBOps, parentBlockID, candidateView, candidateSeals +func (_m *MutableProtocolState) EvolveState(deferredDBOps *deferred.DeferredBlockPersist, parentBlockID flow.Identifier, candidateView uint64, candidateSeals []*flow.Seal) (flow.Identifier, error) { + ret := _m.Called(deferredDBOps, parentBlockID, candidateView, candidateSeals) if len(ret) == 0 { panic("no return value specified for EvolveState") } var r0 flow.Identifier - var r1 *transaction.DeferredBlockPersist - var r2 error - if rf, ok := ret.Get(0).(func(flow.Identifier, uint64, []*flow.Seal) (flow.Identifier, *transaction.DeferredBlockPersist, error)); ok { - return rf(parentBlockID, candidateView, candidateSeals) + var r1 error + if rf, ok := ret.Get(0).(func(*deferred.DeferredBlockPersist, flow.Identifier, uint64, []*flow.Seal) (flow.Identifier, error)); ok { + return rf(deferredDBOps, parentBlockID, candidateView, candidateSeals) } - if rf, ok := ret.Get(0).(func(flow.Identifier, uint64, []*flow.Seal) flow.Identifier); ok { - r0 = rf(parentBlockID, candidateView, candidateSeals) + if rf, ok := ret.Get(0).(func(*deferred.DeferredBlockPersist, flow.Identifier, uint64, []*flow.Seal) flow.Identifier); ok { + r0 = rf(deferredDBOps, parentBlockID, candidateView, candidateSeals) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(flow.Identifier) } } - if rf, ok := ret.Get(1).(func(flow.Identifier, uint64, []*flow.Seal) *transaction.DeferredBlockPersist); ok { - r1 = rf(parentBlockID, candidateView, candidateSeals) + if rf, ok := ret.Get(1).(func(*deferred.DeferredBlockPersist, flow.Identifier, uint64, []*flow.Seal) error); ok { + r1 = rf(deferredDBOps, parentBlockID, candidateView, candidateSeals) } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*transaction.DeferredBlockPersist) - } - } - - if rf, ok := ret.Get(2).(func(flow.Identifier, uint64, []*flow.Seal) error); ok { - r2 = rf(parentBlockID, candidateView, candidateSeals) - } else { - r2 = ret.Error(2) + r1 = ret.Error(1) } - return r0, r1, r2 + return r0, r1 } // GlobalParams provides a mock function with no fields diff --git a/state/protocol/protocol_state.go b/state/protocol/protocol_state.go index d5ce678b557..a570ead3d6c 100644 --- a/state/protocol/protocol_state.go +++ b/state/protocol/protocol_state.go @@ -2,7 +2,7 @@ package protocol import ( "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage/deferred" ) // EpochProtocolState represents the subset of the Protocol State KVStore related to epochs: @@ -163,5 +163,5 @@ type MutableProtocolState interface { // - A consistency or sanity check failing within the StateMutator is likely the symptom of an internal bug // in the node software or state corruption, i.e. case (b). This is the only scenario where the error return // of this function is not nil. If such an exception is returned, continuing is not an option. - EvolveState(parentBlockID flow.Identifier, candidateView uint64, candidateSeals []*flow.Seal) (stateID flow.Identifier, dbUpdates *transaction.DeferredBlockPersist, err error) + EvolveState(deferredDBOps *deferred.DeferredBlockPersist, parentBlockID flow.Identifier, candidateView uint64, candidateSeals []*flow.Seal) (stateID flow.Identifier, err error) } diff --git a/state/protocol/protocol_state/common/base_statemachine.go b/state/protocol/protocol_state/common/base_statemachine.go index e3de0d3e08f..c85c2e4fdff 100644 --- a/state/protocol/protocol_state/common/base_statemachine.go +++ b/state/protocol/protocol_state/common/base_statemachine.go @@ -3,7 +3,7 @@ package common import ( "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/protocol_state" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage/deferred" ) // BaseKeyValueStoreStateMachine implements a subset of the KeyValueStoreStateMachine interface which is usually common @@ -29,8 +29,8 @@ func NewBaseKeyValueStoreStateMachine( } // Build is a no-op by default. If a state machine needs to persist data, it should override this method. -func (m *BaseKeyValueStoreStateMachine) Build() (*transaction.DeferredBlockPersist, error) { - return transaction.NewDeferredBlockPersist(), nil +func (m *BaseKeyValueStoreStateMachine) Build() (*deferred.DeferredBlockPersist, error) { + return deferred.NewDeferredBlockPersist(), nil } // View returns the view associated with this state machine. diff --git a/state/protocol/protocol_state/epochs/base_statemachine.go b/state/protocol/protocol_state/epochs/base_statemachine.go index ea125ef67db..f9205c0f680 100644 --- a/state/protocol/protocol_state/epochs/base_statemachine.go +++ b/state/protocol/protocol_state/epochs/base_statemachine.go @@ -106,19 +106,32 @@ func (u *baseStateMachine) TransitionToNextEpoch() error { if u.view < u.state.NextEpochSetup.FirstView { return fmt.Errorf("epoch transition is only allowed when entering next epoch") } - u.state = &flow.EpochStateEntry{ - MinEpochStateEntry: &flow.MinEpochStateEntry{ + minEpochStateEntry, err := flow.NewMinEpochStateEntry( + flow.UntrustedMinEpochStateEntry{ PreviousEpoch: &u.state.CurrentEpoch, CurrentEpoch: *u.state.NextEpoch, NextEpoch: nil, EpochFallbackTriggered: u.state.EpochFallbackTriggered, }, - PreviousEpochSetup: u.state.CurrentEpochSetup, - PreviousEpochCommit: u.state.CurrentEpochCommit, - CurrentEpochSetup: u.state.NextEpochSetup, - CurrentEpochCommit: u.state.NextEpochCommit, - NextEpochSetup: nil, - NextEpochCommit: nil, + ) + if err != nil { + return fmt.Errorf("could not create min epoch state: %w", err) + } + + u.state, err = flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: minEpochStateEntry, + PreviousEpochSetup: u.state.CurrentEpochSetup, + PreviousEpochCommit: u.state.CurrentEpochCommit, + CurrentEpochSetup: u.state.NextEpochSetup, + CurrentEpochCommit: u.state.NextEpochCommit, + NextEpochSetup: nil, + NextEpochCommit: nil, + }, + ) + if err != nil { + return fmt.Errorf("could not construct epoch state entry: %w", err) } + return nil } diff --git a/state/protocol/protocol_state/epochs/fallback_statemachine.go b/state/protocol/protocol_state/epochs/fallback_statemachine.go index 6456ae7f91c..d271e49df86 100644 --- a/state/protocol/protocol_state/epochs/fallback_statemachine.go +++ b/state/protocol/protocol_state/epochs/fallback_statemachine.go @@ -32,6 +32,11 @@ func NewFallbackStateMachine( ) (*FallbackStateMachine, error) { state := parentEpochState.EpochStateEntry.Copy() nextEpochCommitted := state.EpochPhase() == flow.EpochPhaseCommitted + + nextEpoch := state.NextEpoch + nextEpochSetup := state.NextEpochSetup + nextEpochCommit := state.NextEpochCommit + // we are entering fallback mode, this logic needs to be executed only once if !state.EpochFallbackTriggered { // The next epoch has not been committed. Though setup event may be in the state, make sure it is cleared. @@ -41,9 +46,38 @@ func NewFallbackStateMachine( // we go through with that committed epoch. Otherwise, we have tentative values of an epoch // not yet properly specified, which we have to clear out. if !nextEpochCommitted { - state.NextEpoch = nil + nextEpoch = nil + // update corresponding service events + nextEpochSetup = nil + nextEpochCommit = nil + } + + minEpochStateEntry, err := flow.NewMinEpochStateEntry( + flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: state.PreviousEpoch, + CurrentEpoch: state.CurrentEpoch, + NextEpoch: nextEpoch, + EpochFallbackTriggered: true, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not create min epoch state: %w", err) + } + + state, err = flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: minEpochStateEntry, + PreviousEpochSetup: state.PreviousEpochSetup, + PreviousEpochCommit: state.PreviousEpochCommit, + CurrentEpochSetup: state.CurrentEpochSetup, + CurrentEpochCommit: state.CurrentEpochCommit, + NextEpochSetup: nextEpochSetup, + NextEpochCommit: nextEpochCommit, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not create epoch state entry: %w", err) } - state.EpochFallbackTriggered = true } base, err := newBaseStateMachine(telemetry, view, parentEpochState, state) @@ -91,7 +125,46 @@ func (m *FallbackStateMachine) extendCurrentEpoch(epochExtension flow.EpochExten return fmt.Errorf("cannot extend current epoch when next epoch is present") } - state.CurrentEpoch.EpochExtensions = append(state.CurrentEpoch.EpochExtensions, epochExtension) + epochExtensions := append(state.CurrentEpoch.EpochExtensions, epochExtension) + currentEpoch, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: state.CurrentEpoch.SetupID, + CommitID: state.CurrentEpoch.CommitID, + ActiveIdentities: state.CurrentEpoch.ActiveIdentities, + EpochExtensions: epochExtensions, + }, + ) + if err != nil { + return fmt.Errorf("could not construct current epoch state: %w", err) + } + + newMinEpochStateEntry, err := flow.NewMinEpochStateEntry( + flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: state.PreviousEpoch, + CurrentEpoch: *currentEpoch, + NextEpoch: state.NextEpoch, + EpochFallbackTriggered: state.EpochFallbackTriggered, + }, + ) + if err != nil { + return fmt.Errorf("could not create min epoch state: %w", err) + } + + m.state, err = flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: newMinEpochStateEntry, + PreviousEpochSetup: m.state.PreviousEpochSetup, + PreviousEpochCommit: m.state.PreviousEpochCommit, + CurrentEpochSetup: m.state.CurrentEpochSetup, + CurrentEpochCommit: m.state.CurrentEpochCommit, + NextEpochSetup: m.state.NextEpochSetup, + NextEpochCommit: m.state.NextEpochCommit, + }, + ) + if err != nil { + return fmt.Errorf("could not construct epoch state entry: %w", err) + } + return nil } @@ -186,14 +259,23 @@ func (m *FallbackStateMachine) ProcessEpochRecover(epochRecover *flow.EpochRecov m.telemetry.OnInvalidServiceEvent(epochRecover.ServiceEvent(), fmt.Errorf("rejecting EpochRecover event: %w", err)) return false, nil } - nextEpoch = &flow.EpochStateContainer{ - SetupID: epochRecover.EpochSetup.ID(), - CommitID: epochRecover.EpochCommit.ID(), - ActiveIdentities: nextEpochParticipants, - EpochExtensions: nil, + nextEpochState, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: epochRecover.EpochSetup.ID(), + CommitID: epochRecover.EpochCommit.ID(), + ActiveIdentities: nextEpochParticipants, + EpochExtensions: nil, + }, + ) + if err != nil { + return false, fmt.Errorf("could not construct next epoch state: %w", err) } - err = m.ejector.TrackDynamicIdentityList(nextEpoch.ActiveIdentities) + // update corresponding service events + nextEpochSetup := epochRecover.EpochSetup + nextEpochCommit := epochRecover.EpochCommit + + err = m.ejector.TrackDynamicIdentityList(nextEpochState.ActiveIdentities) if err != nil { if protocol.IsInvalidServiceEventError(err) { m.telemetry.OnInvalidServiceEvent(epochRecover.ServiceEvent(), fmt.Errorf("rejecting EpochRecover event: %w", err)) @@ -202,8 +284,32 @@ func (m *FallbackStateMachine) ProcessEpochRecover(epochRecover *flow.EpochRecov return false, fmt.Errorf("unexpected errors tracking identity list: %w", err) } // if we have processed a valid EpochRecover event, we should exit EFM. - m.state.NextEpoch = nextEpoch - m.state.EpochFallbackTriggered = false + newMinEpochStateEntry, err := flow.NewMinEpochStateEntry( + flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: m.state.PreviousEpoch, + CurrentEpoch: m.state.CurrentEpoch, + NextEpoch: nextEpochState, + EpochFallbackTriggered: false, + }, + ) + if err != nil { + return false, fmt.Errorf("could not create min epoch state: %w", err) + } + + m.state, err = flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: newMinEpochStateEntry, + PreviousEpochSetup: m.state.PreviousEpochSetup, + PreviousEpochCommit: m.state.PreviousEpochCommit, + CurrentEpochSetup: m.state.CurrentEpochSetup, + CurrentEpochCommit: m.state.CurrentEpochCommit, + NextEpochSetup: &nextEpochSetup, + NextEpochCommit: &nextEpochCommit, + }, + ) + if err != nil { + return false, fmt.Errorf("could not construct epoch state entry: %w", err) + } m.telemetry.OnServiceEventProcessed(epochRecover.ServiceEvent()) return true, nil } diff --git a/state/protocol/protocol_state/epochs/happy_path_statemachine.go b/state/protocol/protocol_state/epochs/happy_path_statemachine.go index 96f74f661b0..1ba6de2bcde 100644 --- a/state/protocol/protocol_state/epochs/happy_path_statemachine.go +++ b/state/protocol/protocol_state/epochs/happy_path_statemachine.go @@ -108,12 +108,44 @@ func (u *HappyPathStateMachine) ProcessEpochSetup(epochSetup *flow.EpochSetup) ( } // construct data container specifying next epoch - u.state.NextEpoch = &flow.EpochStateContainer{ - SetupID: epochSetup.ID(), - CommitID: flow.ZeroID, - ActiveIdentities: nextEpochActiveIdentities, + nextEpoch, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: epochSetup.ID(), + CommitID: flow.ZeroID, + ActiveIdentities: nextEpochActiveIdentities, + EpochExtensions: nil, + }, + ) + if err != nil { + return false, fmt.Errorf("could not construct next epoch state: %w", err) + } + + newMinEpochStateEntry, err := flow.NewMinEpochStateEntry( + flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: u.state.PreviousEpoch, + CurrentEpoch: u.state.CurrentEpoch, + NextEpoch: nextEpoch, + EpochFallbackTriggered: u.state.EpochFallbackTriggered, + }, + ) + if err != nil { + return false, fmt.Errorf("could not create min epoch state: %w", err) + } + + u.state, err = flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: newMinEpochStateEntry, + PreviousEpochSetup: u.state.PreviousEpochSetup, + PreviousEpochCommit: u.state.PreviousEpochCommit, + CurrentEpochSetup: u.state.CurrentEpochSetup, + CurrentEpochCommit: u.state.CurrentEpochCommit, + NextEpochSetup: epochSetup, + NextEpochCommit: u.state.NextEpochCommit, + }, + ) + if err != nil { + return false, fmt.Errorf("could not construct epoch state entry: %w", err) } - u.state.NextEpochSetup = epochSetup // subsequent epoch commit event and update identities afterwards. err = u.ejector.TrackDynamicIdentityList(u.state.NextEpoch.ActiveIdentities) @@ -156,8 +188,44 @@ func (u *HappyPathStateMachine) ProcessEpochCommit(epochCommit *flow.EpochCommit return false, fmt.Errorf("invalid epoch commit event for epoch %d: %w", epochCommit.Counter, err) } - u.state.NextEpoch.CommitID = epochCommit.ID() - u.state.NextEpochCommit = epochCommit + nextEpoch, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: u.state.NextEpoch.SetupID, + CommitID: epochCommit.ID(), + ActiveIdentities: u.state.NextEpoch.ActiveIdentities, + EpochExtensions: u.state.NextEpoch.EpochExtensions, + }, + ) + if err != nil { + return false, fmt.Errorf("could not construct next epoch state: %w", err) + } + + newMinEpochStateEntry, err := flow.NewMinEpochStateEntry( + flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: u.state.PreviousEpoch, + CurrentEpoch: u.state.CurrentEpoch, + NextEpoch: nextEpoch, + EpochFallbackTriggered: u.state.EpochFallbackTriggered, + }, + ) + if err != nil { + return false, fmt.Errorf("could not create min epoch state: %w", err) + } + + u.state, err = flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: newMinEpochStateEntry, + PreviousEpochSetup: u.state.PreviousEpochSetup, + PreviousEpochCommit: u.state.PreviousEpochCommit, + CurrentEpochSetup: u.state.CurrentEpochSetup, + CurrentEpochCommit: u.state.CurrentEpochCommit, + NextEpochSetup: u.state.NextEpochSetup, + NextEpochCommit: epochCommit, + }, + ) + if err != nil { + return false, fmt.Errorf("could not construct epoch state entry: %w", err) + } u.telemetry.OnServiceEventProcessed(epochCommit.ServiceEvent()) return true, nil } diff --git a/state/protocol/protocol_state/epochs/statemachine.go b/state/protocol/protocol_state/epochs/statemachine.go index 967057ade4b..8ce1804d5df 100644 --- a/state/protocol/protocol_state/epochs/statemachine.go +++ b/state/protocol/protocol_state/epochs/statemachine.go @@ -3,14 +3,15 @@ package epochs import ( "fmt" + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/protocol_state" "github.com/onflow/flow-go/state/protocol/protocol_state/common" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage/deferred" ) // StateMachine implements a low-level interface for state-changing operations on the Epoch state. @@ -166,7 +167,7 @@ type EpochStateMachine struct { setups storage.EpochSetups commits storage.EpochCommits epochProtocolStateDB storage.EpochProtocolStateEntries - pendingDbUpdates *transaction.DeferredBlockPersist + pendingDBUpdates *deferred.DeferredBlockPersist } var _ protocol_state.KeyValueStoreStateMachine = (*EpochStateMachine)(nil) @@ -226,7 +227,7 @@ func NewEpochStateMachine( setups: setups, commits: commits, epochProtocolStateDB: epochProtocolStateDB, - pendingDbUpdates: transaction.NewDeferredBlockPersist(), + pendingDBUpdates: deferred.NewDeferredBlockPersist(), }, nil } @@ -237,18 +238,22 @@ func NewEpochStateMachine( // but the actual epoch state is stored separately, nevertheless, the epoch state ID is used to sanity check if the // epoch state is consistent with the KV Store. Using this approach, we commit the epoch sub-state to the KV Store which in // affects the Dynamic Protocol State ID which is essentially hash of the KV Store. -func (e *EpochStateMachine) Build() (*transaction.DeferredBlockPersist, error) { +// TODO: update comments +func (e *EpochStateMachine) Build() (*deferred.DeferredBlockPersist, error) { updatedEpochState, updatedStateID, hasChanges := e.activeStateMachine.Build() - e.pendingDbUpdates.AddIndexingOp(func(blockID flow.Identifier, tx *transaction.Tx) error { - return e.epochProtocolStateDB.Index(blockID, updatedStateID)(tx) + + e.pendingDBUpdates.AddNextOperation(func(_ lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return e.epochProtocolStateDB.BatchIndex(rw, blockID, updatedStateID) }) + if hasChanges { - e.pendingDbUpdates.AddDbOp(operation.SkipDuplicatesTx( - e.epochProtocolStateDB.StoreTx(updatedStateID, updatedEpochState.MinEpochStateEntry))) + e.pendingDBUpdates.AddNextOperation(func(_ lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return e.epochProtocolStateDB.BatchStore(rw.Writer(), updatedStateID, updatedEpochState.MinEpochStateEntry) + }) } e.EvolvingState.SetEpochStateID(updatedStateID) - return e.pendingDbUpdates, nil + return e.pendingDBUpdates, nil } // EvolveState applies the state change(s) on the Epoch sub-state, based on information from the candidate block @@ -295,7 +300,8 @@ func (e *EpochStateMachine) EvolveState(sealedServiceEvents []flow.ServiceEvent) return irrecoverable.NewExceptionf("could not apply service events from ordered results: %w", err) } } - e.pendingDbUpdates.AddIndexingOps(dbUpdates.Pending()) + + e.pendingDBUpdates.Chain(dbUpdates) return nil } @@ -308,7 +314,7 @@ func (e *EpochStateMachine) EvolveState(sealedServiceEvents []flow.ServiceEvent) // it returns the deferred DB updates to be applied to the storage. // Expected errors during normal operations: // - `protocol.InvalidServiceEventError` if any service event is invalid or is not a valid state transition for the current protocol state -func (e *EpochStateMachine) evolveActiveStateMachine(sealedServiceEvents []flow.ServiceEvent) (*transaction.DeferredBlockPersist, error) { +func (e *EpochStateMachine) evolveActiveStateMachine(sealedServiceEvents []flow.ServiceEvent) (*deferred.DeferredBlockPersist, error) { parentProtocolState := e.activeStateMachine.ParentState() // STEP 1: transition to next epoch if next epoch is committed *and* we are at first block of epoch @@ -321,7 +327,7 @@ func (e *EpochStateMachine) evolveActiveStateMachine(sealedServiceEvents []flow. } // STEP 2: apply service events (input events already required to be ordered by block height). - dbUpdates := transaction.NewDeferredBlockPersist() + dbUpdates := deferred.NewDeferredBlockPersist() for _, event := range sealedServiceEvents { switch ev := event.Event.(type) { case *flow.EpochSetup: @@ -330,7 +336,9 @@ func (e *EpochStateMachine) evolveActiveStateMachine(sealedServiceEvents []flow. return nil, fmt.Errorf("could not process epoch setup event: %w", err) } if processed { - dbUpdates.AddDbOp(e.setups.StoreTx(ev)) // we'll insert the setup event when we insert the block + dbUpdates.AddNextOperation(func(_ lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return e.setups.BatchStore(rw, ev) // we'll insert the setup event when we insert the block + }) } case *flow.EpochCommit: @@ -339,7 +347,9 @@ func (e *EpochStateMachine) evolveActiveStateMachine(sealedServiceEvents []flow. return nil, fmt.Errorf("could not process epoch commit event: %w", err) } if processed { - dbUpdates.AddDbOp(e.commits.StoreTx(ev)) // we'll insert the commit event when we insert the block + dbUpdates.AddNextOperation(func(_ lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return e.commits.BatchStore(rw, ev) // we'll insert the commit event when we insert the block + }) } case *flow.EpochRecover: processed, err := e.activeStateMachine.ProcessEpochRecover(ev) @@ -347,7 +357,13 @@ func (e *EpochStateMachine) evolveActiveStateMachine(sealedServiceEvents []flow. return nil, fmt.Errorf("could not process epoch recover event: %w", err) } if processed { - dbUpdates.AddDbOps(e.setups.StoreTx(&ev.EpochSetup), e.commits.StoreTx(&ev.EpochCommit)) // we'll insert the setup & commit events when we insert the block + dbUpdates.AddNextOperation(func(_ lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + err := e.setups.BatchStore(rw, &ev.EpochSetup) + if err != nil { + return err + } + return e.commits.BatchStore(rw, &ev.EpochCommit) // we'll insert the setup & commit events when we insert the block + }) } case *flow.EjectNode: _ = e.activeStateMachine.EjectIdentity(ev) diff --git a/state/protocol/protocol_state/epochs/statemachine_test.go b/state/protocol/protocol_state/epochs/statemachine_test.go index 35ecdd372ef..a82dae8bb63 100644 --- a/state/protocol/protocol_state/epochs/statemachine_test.go +++ b/state/protocol/protocol_state/epochs/statemachine_test.go @@ -16,7 +16,6 @@ import ( "github.com/onflow/flow-go/state/protocol/protocol_state/epochs" "github.com/onflow/flow-go/state/protocol/protocol_state/epochs/mock" protocol_statemock "github.com/onflow/flow-go/state/protocol/protocol_state/mock" - "github.com/onflow/flow-go/storage/badger/transaction" storagemock "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -96,17 +95,19 @@ func (s *EpochStateMachineSuite) TestBuild_NoChanges() { err := s.stateMachine.EvolveState(nil) require.NoError(s.T(), err) - indexTxDeferredUpdate := storagemock.NewDeferredDBUpdate(s.T()) - indexTxDeferredUpdate.On("Execute", mocks.Anything).Return(nil).Once() + rw := storagemock.NewReaderBatchWriter(s.T()) - s.epochStateDB.On("Index", s.candidate.ID(), s.parentEpochState.ID()).Return(indexTxDeferredUpdate.Execute, nil).Once() + s.epochStateDB.On("BatchIndex", rw, s.candidate.ID(), s.parentEpochState.ID()).Return(nil).Once() s.mutator.On("SetEpochStateID", s.parentEpochState.ID()).Return(nil).Once() dbUpdates, err := s.stateMachine.Build() require.NoError(s.T(), err) - // Provide the blockID and execute the resulting `DeferredDBUpdate`. Thereby, - // the expected mock methods should be called, which is asserted by the testify framework - err = dbUpdates.Pending().WithBlock(s.candidate.ID())(&transaction.Tx{}) + + // Provide the blockID and execute the resulting `dbUpdates`. Thereby, the expected mock methods should be called, + // which is asserted by the testify framework. Passing nil lockctx proof because no operations require lock; + // operations are deferred only because block ID is not known yet. + blockID := s.candidate.ID() + err = dbUpdates.Execute(nil, blockID, rw) require.NoError(s.T(), err) } @@ -126,34 +127,31 @@ func (s *EpochStateMachineSuite) TestBuild_HappyPath() { s.happyPathStateMachine.On("ProcessEpochSetup", epochSetup).Return(true, nil).Once() s.happyPathStateMachine.On("ProcessEpochCommit", epochCommit).Return(true, nil).Once() + w := storagemock.NewWriter(s.T()) + rw := storagemock.NewReaderBatchWriter(s.T()) + rw.On("Writer").Return(w).Once() // called by epochStateDB.BatchStore // prepare a DB update for epoch setup - storeEpochSetupTx := storagemock.NewDeferredDBUpdate(s.T()) - storeEpochSetupTx.On("Execute", mocks.Anything).Return(nil).Once() - s.setupsDB.On("StoreTx", epochSetup).Return(storeEpochSetupTx.Execute, nil).Once() + s.setupsDB.On("BatchStore", rw, epochSetup).Return(nil).Once() // prepare a DB update for epoch commit - storeEpochCommitTx := storagemock.NewDeferredDBUpdate(s.T()) - storeEpochCommitTx.On("Execute", mocks.Anything).Return(nil).Once() - s.commitsDB.On("StoreTx", epochCommit).Return(storeEpochCommitTx.Execute, nil).Once() + s.commitsDB.On("BatchStore", rw, epochCommit).Return(nil).Once() err := s.stateMachine.EvolveState([]flow.ServiceEvent{epochSetup.ServiceEvent(), epochCommit.ServiceEvent()}) require.NoError(s.T(), err) // prepare a DB update for epoch state - indexTxDeferredUpdate := storagemock.NewDeferredDBUpdate(s.T()) - indexTxDeferredUpdate.On("Execute", mocks.Anything).Return(nil).Once() - storeTxDeferredUpdate := storagemock.NewDeferredDBUpdate(s.T()) - storeTxDeferredUpdate.On("Execute", mocks.Anything).Return(nil).Once() - - s.epochStateDB.On("Index", s.candidate.ID(), updatedStateID).Return(indexTxDeferredUpdate.Execute, nil).Once() - s.epochStateDB.On("StoreTx", updatedStateID, updatedState.MinEpochStateEntry).Return(storeTxDeferredUpdate.Execute, nil).Once() + s.epochStateDB.On("BatchIndex", rw, s.candidate.ID(), updatedStateID).Return(nil).Once() + s.epochStateDB.On("BatchStore", w, updatedStateID, updatedState.MinEpochStateEntry).Return(nil).Once() s.mutator.On("SetEpochStateID", updatedStateID).Return(nil).Once() dbUpdates, err := s.stateMachine.Build() require.NoError(s.T(), err) - // Provide the blockID and execute the resulting `DeferredDBUpdate`. Thereby, - // the expected mock methods should be called, which is asserted by the testify framework - err = dbUpdates.Pending().WithBlock(s.candidate.ID())(&transaction.Tx{}) + + // Provide the blockID and execute the resulting `dbUpdates`. Thereby, the expected mock methods should be called, + // which is asserted by the testify framework. Passing nil lockctx proof because no operations require lock; + // operations are deferred only because block ID is not known yet. + blockID := s.candidate.ID() + err = dbUpdates.Execute(nil, blockID, rw) require.NoError(s.T(), err) } @@ -534,9 +532,7 @@ func (s *EpochStateMachineSuite) TestEvolveStateTransitionToNextEpoch_WithInvali err = stateMachine.EvolveState([]flow.ServiceEvent{invalidServiceEvent.ServiceEvent()}) require.NoError(s.T(), err) - indexTxDeferredUpdate := storagemock.NewDeferredDBUpdate(s.T()) - indexTxDeferredUpdate.On("Execute", mocks.Anything).Return(nil).Once() - s.epochStateDB.On("Index", s.candidate.ID(), mocks.Anything).Return(indexTxDeferredUpdate.Execute, nil).Once() + s.epochStateDB.On("BatchIndex", mocks.Anything, s.candidate.ID(), mocks.Anything).Return(nil).Once() expectedEpochState := &flow.MinEpochStateEntry{ PreviousEpoch: s.parentEpochState.CurrentEpoch.Copy(), @@ -545,15 +541,20 @@ func (s *EpochStateMachineSuite) TestEvolveStateTransitionToNextEpoch_WithInvali EpochFallbackTriggered: true, } - storeTxDeferredUpdate := storagemock.NewDeferredDBUpdate(s.T()) - storeTxDeferredUpdate.On("Execute", mocks.Anything).Return(nil).Once() - s.epochStateDB.On("StoreTx", expectedEpochState.ID(), expectedEpochState).Return(storeTxDeferredUpdate.Execute, nil).Once() + s.epochStateDB.On("BatchStore", mocks.Anything, expectedEpochState.ID(), expectedEpochState).Return(nil).Once() s.mutator.On("SetEpochStateID", expectedEpochState.ID()).Return().Once() dbOps, err := stateMachine.Build() require.NoError(s.T(), err) - // Provide the blockID and execute the resulting `DeferredDBUpdate`. Thereby, - // the expected mock methods should be called, which is asserted by the testify framework - err = dbOps.Pending().WithBlock(s.candidate.ID())(&transaction.Tx{}) + + w := storagemock.NewWriter(s.T()) + rw := storagemock.NewReaderBatchWriter(s.T()) + rw.On("Writer").Return(w).Once() // called by epochStateDB.BatchStore + + // Provide the blockID and execute the resulting `dbUpdates`. Thereby, the expected mock methods should be called, + // which is asserted by the testify framework. Passing nil lockctx proof because no operations require lock; + // operations are deferred only because block ID is not known yet + blockID := s.candidate.ID() + err = dbOps.Execute(nil, blockID, rw) require.NoError(s.T(), err) } diff --git a/state/protocol/protocol_state/kvstore.go b/state/protocol/protocol_state/kvstore.go index 7a8d784c752..592870e1894 100644 --- a/state/protocol/protocol_state/kvstore.go +++ b/state/protocol/protocol_state/kvstore.go @@ -3,7 +3,7 @@ package protocol_state import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage/deferred" ) // This file contains versioned read-write interfaces to the Protocol State's @@ -103,7 +103,7 @@ type OrthogonalStoreStateMachine[P any] interface { // Deferred updates must be applied in a transaction to ensure atomicity. // // No errors are expected during normal operations. - Build() (*transaction.DeferredBlockPersist, error) + Build() (*deferred.DeferredBlockPersist, error) // EvolveState applies the state change(s) on sub-state P for the candidate block (under construction). // Information that potentially changes the Epoch state (compared to the parent block's state): diff --git a/state/protocol/protocol_state/kvstore/kvstore_storage.go b/state/protocol/protocol_state/kvstore/kvstore_storage.go index dfb98c02cc3..b5613d6ba39 100644 --- a/state/protocol/protocol_state/kvstore/kvstore_storage.go +++ b/state/protocol/protocol_state/kvstore/kvstore_storage.go @@ -3,11 +3,12 @@ package kvstore import ( "fmt" + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/protocol_state" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/transaction" ) // ProtocolKVStore persists different snapshots of key-value stores [KV-stores]. Here, we augment @@ -31,19 +32,21 @@ func NewProtocolKVStore(protocolStateSnapshots storage.ProtocolKVStore) *Protoco } } -// StoreTx returns an anonymous function (intended to be executed as part of a badger transaction), which persists -// the given KV-store snapshot as part of a DB tx. Per convention, all implementations of `protocol.KVStoreReader` -// must support encoding their state into a version and data blob. -// Expected errors of the returned anonymous function: -// - storage.ErrAlreadyExists if a KV-store snapshot with the given id is already stored. -func (p *ProtocolKVStore) StoreTx(stateID flow.Identifier, kvStore protocol.KVStoreReader) func(*transaction.Tx) error { +// BatchStore adds the KV-store snapshot in the database using the given ID as key. Per convention, all +// implementations of [protocol.KVStoreReader] should be able to successfully encode their state into a +// data blob. If the encoding fails, an error is returned. +// BatchStore is idempotent, i.e. it accepts repeated calls with the same pairs of (stateID, kvStore). +// Here, the ID is expected to be a collision-resistant hash of the snapshot (including the +// ProtocolStateVersion). Hence, for the same ID (key), BatchStore will reject changing the data (value). +// +// Expected errors during normal operations: +// - storage.ErrDataMismatch if a _different_ KV store for the given stateID has already been persisted +func (p *ProtocolKVStore) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, stateID flow.Identifier, kvStore protocol.KVStoreReader) error { version, data, err := kvStore.VersionedEncode() if err != nil { - return func(*transaction.Tx) error { - return fmt.Errorf("failed to VersionedEncode protocol state: %w", err) - } + return fmt.Errorf("failed to VersionedEncode protocol state: %w", err) } - return p.ProtocolKVStore.StoreTx(stateID, &flow.PSKeyValueStoreData{ + return p.ProtocolKVStore.BatchStore(lctx, rw, stateID, &flow.PSKeyValueStoreData{ Version: version, Data: data, }) diff --git a/state/protocol/protocol_state/kvstore/kvstore_storage_test.go b/state/protocol/protocol_state/kvstore/kvstore_storage_test.go index 98f7dd9176e..d1c59283079 100644 --- a/state/protocol/protocol_state/kvstore/kvstore_storage_test.go +++ b/state/protocol/protocol_state/kvstore/kvstore_storage_test.go @@ -5,13 +5,12 @@ import ( "math" "testing" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" protocol_statemock "github.com/onflow/flow-go/state/protocol/protocol_state/mock" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage" storagemock "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -29,6 +28,7 @@ func TestProtocolKVStore_StoreTx(t *testing.T) { // On the happy path, where the input `kvState` encodes its state successfully, the wrapped store // should be called to persist the version-encoded snapshot. t.Run("happy path", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() expectedVersion := uint64(13) encData := unittest.RandomBytes(117) versionedSnapshot := &flow.PSKeyValueStoreData{ @@ -37,27 +37,33 @@ func TestProtocolKVStore_StoreTx(t *testing.T) { } kvState.On("VersionedEncode").Return(expectedVersion, encData, nil).Once() - deferredUpdate := storagemock.NewDeferredDBUpdate(t) - deferredUpdate.On("Execute", mock.Anything).Return(nil).Once() - llStorage.On("StoreTx", kvStateID, versionedSnapshot).Return(deferredUpdate.Execute).Once() + lctx := lockManager.NewContext() + defer lctx.Release() + require.NoError(t, lctx.AcquireLock(storage.LockInsertBlock)) + rw := storagemock.NewReaderBatchWriter(t) + llStorage.On("BatchStore", lctx, rw, kvStateID, versionedSnapshot).Return(nil).Once() - // Calling `StoreTx` should return the output of the wrapped low-level storage, which is a deferred database + // TODO: potentially update - we might be bringing back a functor here, because we acquire a lock as explained in slack thread https://flow-foundation.slack.com/archives/C071612SJJE/p1754600182033289?thread_ts=1752912083.194619&cid=C071612SJJE + // Calling `BatchStore` should return the output of the wrapped low-level storage, which is a deferred database // update. Conceptually, it is possible that `ProtocolKVStore` wraps the deferred database operation in faulty // code, such that it cannot be executed. Therefore, we execute the top-level deferred database update below // and verify that the deferred database operation returned by the lower-level is actually reached. - dbUpdate := store.StoreTx(kvStateID, kvState) - err := dbUpdate(&transaction.Tx{}) - require.NoError(t, err) + require.NoError(t, store.BatchStore(lctx, rw, kvStateID, kvState)) }) // On the unhappy path, i.e. when the encoding of input `kvState` failed, `ProtocolKVStore` should produce // a deferred database update that always returns the encoding error. t.Run("encoding fails", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() encodingError := errors.New("encoding error") + kvState.On("VersionedEncode").Return(uint64(0), nil, encodingError).Once() - dbUpdate := store.StoreTx(kvStateID, kvState) - err := dbUpdate(&transaction.Tx{}) + lctx := lockManager.NewContext() + defer lctx.Release() + require.NoError(t, lctx.AcquireLock(storage.LockInsertBlock)) + rw := storagemock.NewReaderBatchWriter(t) + err := store.BatchStore(lctx, rw, kvStateID, kvState) require.ErrorIs(t, err, encodingError) }) } @@ -73,29 +79,33 @@ func TestProtocolKVStore_IndexTx(t *testing.T) { // should be called to persist the version-encoded snapshot. t.Run("happy path", func(t *testing.T) { - deferredUpdate := storagemock.NewDeferredDBUpdate(t) - deferredUpdate.On("Execute", mock.Anything).Return(nil).Once() - llStorage.On("IndexTx", blockID, stateID).Return(deferredUpdate.Execute).Once() - - // Calling `IndexTx` should return the output of the wrapped low-level storage, which is a deferred database + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + defer lctx.Release() + require.NoError(t, lctx.AcquireLock(storage.LockInsertBlock)) + rw := storagemock.NewReaderBatchWriter(t) + llStorage.On("BatchIndex", lctx, rw, blockID, stateID).Return(nil).Once() + + // TODO: potentially update - we might be bringing back a functor here, because we acquire a lock as explained in slack thread https://flow-foundation.slack.com/archives/C071612SJJE/p1754600182033289?thread_ts=1752912083.194619&cid=C071612SJJE + // Calling `BatchIndex` should return the output of the wrapped low-level storage, which is a deferred database // update. Conceptually, it is possible that `ProtocolKVStore` wraps the deferred database operation in faulty // code, such that it cannot be executed. Therefore, we execute the top-level deferred database update below // and verify that the deferred database operation returned by the lower-level is actually reached. - dbUpdate := store.IndexTx(blockID, stateID) - err := dbUpdate(&transaction.Tx{}) - require.NoError(t, err) + require.NoError(t, store.BatchIndex(lctx, rw, blockID, stateID)) }) // On the unhappy path, the deferred database update from the lower level just errors upon execution. // This error should be escalated. t.Run("unhappy path", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + defer lctx.Release() + require.NoError(t, lctx.AcquireLock(storage.LockInsertBlock)) indexingError := errors.New("indexing error") - deferredUpdate := storagemock.NewDeferredDBUpdate(t) - deferredUpdate.On("Execute", mock.Anything).Return(indexingError).Once() - llStorage.On("IndexTx", blockID, stateID).Return(deferredUpdate.Execute).Once() + rw := storagemock.NewReaderBatchWriter(t) + llStorage.On("BatchIndex", lctx, rw, blockID, stateID).Return(indexingError).Once() - dbUpdate := store.IndexTx(blockID, stateID) - err := dbUpdate(&transaction.Tx{}) + err := store.BatchIndex(lctx, rw, blockID, stateID) require.ErrorIs(t, err, indexingError) }) } diff --git a/state/protocol/protocol_state/kvstore_storage.go b/state/protocol/protocol_state/kvstore_storage.go index e0a57f90f47..cee1b7cfc07 100644 --- a/state/protocol/protocol_state/kvstore_storage.go +++ b/state/protocol/protocol_state/kvstore_storage.go @@ -1,9 +1,11 @@ package protocol_state import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage" ) // ProtocolKVStore persists different snapshots of the Protocol State's Key-Calue stores [KV-stores]. @@ -14,19 +16,20 @@ import ( // supported by the current software version. There might be serialized snapshots with legacy versions // in the database that are not supported anymore by this software version. type ProtocolKVStore interface { - // StoreTx returns an anonymous function (intended to be executed as part of a database transaction), - // which persists the given KV-store snapshot as part of a DB tx. Per convention, all implementations - // of `protocol.KVStoreReader` should be able to successfully encode their state into a data blob. - // If the encoding fails, the anonymous function returns an error upon call. + // BatchStore adds the KV-store snapshot in the database using the given ID as key. Per convention, all + // implementations of [protocol.KVStoreReader] should be able to successfully encode their state into a + // data blob. If the encoding fails, an error is returned. + // BatchStore is idempotent, i.e. it accepts repeated calls with the same pairs of (stateID, kvStore). + // Here, the ID is expected to be a collision-resistant hash of the snapshot (including the + // ProtocolStateVersion). Hence, for the same ID (key), BatchStore will reject changing the data (value). // - // Expected errors of the returned anonymous function: - // - storage.ErrAlreadyExists if a KV-store snapshot with the given id is already stored. - StoreTx(stateID flow.Identifier, kvStore protocol.KVStoreReader) func(*transaction.Tx) error + // Expected errors during normal operations: + // - storage.ErrDataMismatch if a _different_ KV store for the given stateID has already been persisted + BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, stateID flow.Identifier, kvStore protocol.KVStoreReader) error - // IndexTx returns an anonymous function intended to be executed as part of a database transaction. + // BatchIndex writes the blockID->stateID index to the input write batch. // In a nutshell, we want to maintain a map from `blockID` to `stateID`, where `blockID` references the // block that _proposes_ the updated key-value store. - // Upon call, the anonymous function persists the specific map entry in the node's database. // Protocol convention: // - Consider block B, whose ingestion might potentially lead to an updated KV store. For example, // the KV store changes if we seal some execution results emitting specific service events. @@ -34,9 +37,10 @@ type ProtocolKVStore interface { // - CAUTION: The updated state requires confirmation by a QC and will only become active at the // child block, _after_ validating the QC. // + // It requires the caller to acquire storage.LockInsertBlock lock // Expected errors of the returned anonymous function: // - storage.ErrAlreadyExists if a KV store for the given blockID has already been indexed. - IndexTx(blockID flow.Identifier, stateID flow.Identifier) func(*transaction.Tx) error + BatchIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, stateID flow.Identifier) error // ByID retrieves the KV store snapshot with the given ID. // Expected errors during normal operations: diff --git a/state/protocol/protocol_state/mock/deferred_op.go b/state/protocol/protocol_state/mock/deferred_op.go new file mode 100644 index 00000000000..d7c6ae750d2 --- /dev/null +++ b/state/protocol/protocol_state/mock/deferred_op.go @@ -0,0 +1,47 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + storage "github.com/onflow/flow-go/storage" +) + +// DeferredOp is an autogenerated mock type for the DeferredOp type +type DeferredOp struct { + mock.Mock +} + +// Execute provides a mock function with given fields: blockID, rw +func (_m *DeferredOp) Execute(blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + ret := _m.Called(blockID, rw) + + if len(ret) == 0 { + panic("no return value specified for Execute") + } + + var r0 error + if rf, ok := ret.Get(0).(func(flow.Identifier, storage.ReaderBatchWriter) error); ok { + r0 = rf(blockID, rw) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewDeferredOp creates a new instance of DeferredOp. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDeferredOp(t interface { + mock.TestingT + Cleanup(func()) +}) *DeferredOp { + mock := &DeferredOp{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/protocol_state/mock/key_value_store_state_machine.go b/state/protocol/protocol_state/mock/key_value_store_state_machine.go index 3ff218592af..5fe45b71d11 100644 --- a/state/protocol/protocol_state/mock/key_value_store_state_machine.go +++ b/state/protocol/protocol_state/mock/key_value_store_state_machine.go @@ -4,11 +4,11 @@ package mock import ( flow "github.com/onflow/flow-go/model/flow" + deferred "github.com/onflow/flow-go/storage/deferred" + mock "github.com/stretchr/testify/mock" protocol "github.com/onflow/flow-go/state/protocol" - - transaction "github.com/onflow/flow-go/storage/badger/transaction" ) // KeyValueStoreStateMachine is an autogenerated mock type for the KeyValueStoreStateMachine type @@ -17,23 +17,23 @@ type KeyValueStoreStateMachine[P interface{}] struct { } // Build provides a mock function with no fields -func (_m *KeyValueStoreStateMachine[P]) Build() (*transaction.DeferredBlockPersist, error) { +func (_m *KeyValueStoreStateMachine[P]) Build() (*deferred.DeferredBlockPersist, error) { ret := _m.Called() if len(ret) == 0 { panic("no return value specified for Build") } - var r0 *transaction.DeferredBlockPersist + var r0 *deferred.DeferredBlockPersist var r1 error - if rf, ok := ret.Get(0).(func() (*transaction.DeferredBlockPersist, error)); ok { + if rf, ok := ret.Get(0).(func() (*deferred.DeferredBlockPersist, error)); ok { return rf() } - if rf, ok := ret.Get(0).(func() *transaction.DeferredBlockPersist); ok { + if rf, ok := ret.Get(0).(func() *deferred.DeferredBlockPersist); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*transaction.DeferredBlockPersist) + r0 = ret.Get(0).(*deferred.DeferredBlockPersist) } } diff --git a/state/protocol/protocol_state/mock/orthogonal_store_state_machine.go b/state/protocol/protocol_state/mock/orthogonal_store_state_machine.go index 44aac567909..65b3f35ac98 100644 --- a/state/protocol/protocol_state/mock/orthogonal_store_state_machine.go +++ b/state/protocol/protocol_state/mock/orthogonal_store_state_machine.go @@ -4,9 +4,9 @@ package mock import ( flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" + deferred "github.com/onflow/flow-go/storage/deferred" - transaction "github.com/onflow/flow-go/storage/badger/transaction" + mock "github.com/stretchr/testify/mock" ) // OrthogonalStoreStateMachine is an autogenerated mock type for the OrthogonalStoreStateMachine type @@ -15,23 +15,23 @@ type OrthogonalStoreStateMachine[P interface{}] struct { } // Build provides a mock function with no fields -func (_m *OrthogonalStoreStateMachine[P]) Build() (*transaction.DeferredBlockPersist, error) { +func (_m *OrthogonalStoreStateMachine[P]) Build() (*deferred.DeferredBlockPersist, error) { ret := _m.Called() if len(ret) == 0 { panic("no return value specified for Build") } - var r0 *transaction.DeferredBlockPersist + var r0 *deferred.DeferredBlockPersist var r1 error - if rf, ok := ret.Get(0).(func() (*transaction.DeferredBlockPersist, error)); ok { + if rf, ok := ret.Get(0).(func() (*deferred.DeferredBlockPersist, error)); ok { return rf() } - if rf, ok := ret.Get(0).(func() *transaction.DeferredBlockPersist); ok { + if rf, ok := ret.Get(0).(func() *deferred.DeferredBlockPersist); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*transaction.DeferredBlockPersist) + r0 = ret.Get(0).(*deferred.DeferredBlockPersist) } } diff --git a/state/protocol/protocol_state/mock/protocol_kv_store.go b/state/protocol/protocol_state/mock/protocol_kv_store.go index 290a662cc69..ed4de36f193 100644 --- a/state/protocol/protocol_state/mock/protocol_kv_store.go +++ b/state/protocol/protocol_state/mock/protocol_kv_store.go @@ -3,14 +3,16 @@ package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" protocol "github.com/onflow/flow-go/state/protocol" protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state" - transaction "github.com/onflow/flow-go/storage/badger/transaction" + storage "github.com/onflow/flow-go/storage" ) // ProtocolKVStore is an autogenerated mock type for the ProtocolKVStore type @@ -18,6 +20,42 @@ type ProtocolKVStore struct { mock.Mock } +// BatchIndex provides a mock function with given fields: lctx, rw, blockID, stateID +func (_m *ProtocolKVStore) BatchIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, stateID flow.Identifier) error { + ret := _m.Called(lctx, rw, blockID, stateID) + + if len(ret) == 0 { + panic("no return value specified for BatchIndex") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, flow.Identifier, flow.Identifier) error); ok { + r0 = rf(lctx, rw, blockID, stateID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BatchStore provides a mock function with given fields: lctx, rw, stateID, kvStore +func (_m *ProtocolKVStore) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, stateID flow.Identifier, kvStore protocol.KVStoreReader) error { + ret := _m.Called(lctx, rw, stateID, kvStore) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, flow.Identifier, protocol.KVStoreReader) error); ok { + r0 = rf(lctx, rw, stateID, kvStore) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ByBlockID provides a mock function with given fields: blockID func (_m *ProtocolKVStore) ByBlockID(blockID flow.Identifier) (protocol_state.KVStoreAPI, error) { ret := _m.Called(blockID) @@ -78,46 +116,6 @@ func (_m *ProtocolKVStore) ByID(id flow.Identifier) (protocol_state.KVStoreAPI, return r0, r1 } -// IndexTx provides a mock function with given fields: blockID, stateID -func (_m *ProtocolKVStore) IndexTx(blockID flow.Identifier, stateID flow.Identifier) func(*transaction.Tx) error { - ret := _m.Called(blockID, stateID) - - if len(ret) == 0 { - panic("no return value specified for IndexTx") - } - - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) func(*transaction.Tx) error); ok { - r0 = rf(blockID, stateID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) - } - } - - return r0 -} - -// StoreTx provides a mock function with given fields: stateID, kvStore -func (_m *ProtocolKVStore) StoreTx(stateID flow.Identifier, kvStore protocol.KVStoreReader) func(*transaction.Tx) error { - ret := _m.Called(stateID, kvStore) - - if len(ret) == 0 { - panic("no return value specified for StoreTx") - } - - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(flow.Identifier, protocol.KVStoreReader) func(*transaction.Tx) error); ok { - r0 = rf(stateID, kvStore) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) - } - } - - return r0 -} - // NewProtocolKVStore creates a new instance of ProtocolKVStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewProtocolKVStore(t interface { diff --git a/state/protocol/protocol_state/state/mutable_protocol_state_test.go b/state/protocol/protocol_state/state/mutable_protocol_state_test.go index ebb914d499b..f552c5ab7ab 100644 --- a/state/protocol/protocol_state/state/mutable_protocol_state_test.go +++ b/state/protocol/protocol_state/state/mutable_protocol_state_test.go @@ -10,12 +10,13 @@ import ( "golang.org/x/exp/slices" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/state/protocol" psmock "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/state/protocol/protocol_state" protocol_statemock "github.com/onflow/flow-go/state/protocol/protocol_state/mock" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage/deferred" storagemock "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -102,25 +103,24 @@ func (s *StateMutatorSuite) SetupTest() { // actually called when executing the returned `DeferredBlockPersist` func (s *StateMutatorSuite) testEvolveState(seals []*flow.Seal, expectedResultingStateID flow.Identifier, stateChangeExpected bool) { // on the happy path, we _always_ require a deferred db update, which indexes the protocol state by the candidate block's ID - indexTxDeferredUpdate := storagemock.NewDeferredDBUpdate(s.T()) - indexTxDeferredUpdate.On("Execute", mock.Anything).Return(nil).Once() - s.protocolKVStoreDB.On("IndexTx", s.candidate.ID(), expectedResultingStateID).Return(indexTxDeferredUpdate.Execute).Once() + rw := storagemock.NewReaderBatchWriter(s.T()) + s.protocolKVStoreDB.On("BatchIndex", mock.Anything, rw, s.candidate.ID(), expectedResultingStateID).Return(nil).Once() // expect calls to prepare a deferred update for indexing and storing the resulting state: // as state has not changed, we expect the parent blocks protocol state ID - storeTxDeferredUpdate := storagemock.NewDeferredDBUpdate(s.T()) if stateChangeExpected { - storeTxDeferredUpdate.On("Execute", mock.Anything).Return(nil).Once() - s.protocolKVStoreDB.On("StoreTx", expectedResultingStateID, &s.evolvingState).Return(storeTxDeferredUpdate.Execute).Once() + s.protocolKVStoreDB.On("BatchStore", mock.Anything, rw, expectedResultingStateID, &s.evolvingState).Return(nil).Once() } - resultingStateID, dbUpdates, err := s.mutableState.EvolveState(s.candidate.ParentID, s.candidate.View, seals) + deferredDBOps := deferred.NewDeferredBlockPersist() + resultingStateID, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, seals) require.NoError(s.T(), err) require.Equal(s.T(), expectedResultingStateID, resultingStateID) // Provide the blockID and execute the resulting `DeferredDBUpdate`. Thereby, // the expected mock methods should be called, which is asserted by the testify framework - err = dbUpdates.Pending().WithBlock(s.candidate.ID())(&transaction.Tx{}) + blockID := s.candidate.ID() + err = deferredDBOps.Execute(nil, blockID, rw) require.NoError(s.T(), err) // The testify framework calls `AssertExpectations` on all mocks when the test finishes. However, note that we are calling @@ -129,8 +129,7 @@ func (s *StateMutatorSuite) testEvolveState(seals []*flow.Seal, expectedResultin // in the sub-test where we expect them to. To avoid any problems, we call `AssertExpectations` below to enforce the expected // mock calls happened that `testEvolveState` added. s.protocolKVStoreDB.AssertExpectations(s.T()) - indexTxDeferredUpdate.AssertExpectations(s.T()) - storeTxDeferredUpdate.AssertExpectations(s.T()) + rw.AssertExpectations(s.T()) } // Test_HappyPath_StateInvariant tests that `MutableProtocolState.EvolveState` returns all updates from sub-state state machines and @@ -400,10 +399,11 @@ func (s *StateMutatorSuite) Test_InvalidParent() { unknownParent := unittest.IdentifierFixture() s.protocolKVStoreDB.On("ByBlockID", unknownParent).Return(nil, storage.ErrNotFound) - _, dbUpdates, err := s.mutableState.EvolveState(unknownParent, s.candidate.View, []*flow.Seal{}) + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, unknownParent, s.candidate.View, []*flow.Seal{}) require.Error(s.T(), err) require.False(s.T(), protocol.IsInvalidServiceEventError(err)) - require.True(s.T(), dbUpdates.IsEmpty()) + require.True(s.T(), deferredDBOps.IsEmpty()) } // Test_ReplicateFails verifies that errors during the parent state replication are escalated to the caller. @@ -420,9 +420,10 @@ func (s *StateMutatorSuite) Test_ReplicateFails() { s.kvStateMachineFactories[0] = *protocol_statemock.NewKeyValueStoreStateMachineFactory(s.T()) s.kvStateMachineFactories[1] = *protocol_statemock.NewKeyValueStoreStateMachineFactory(s.T()) - _, dbUpdates, err := s.mutableState.EvolveState(s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) require.ErrorIs(s.T(), err, exception) - require.True(s.T(), dbUpdates.IsEmpty()) + require.True(s.T(), deferredDBOps.IsEmpty()) } // Test_StateMachineFactoryFails verifies that errors received while creating the sub-state machines are escalated to the caller. @@ -446,17 +447,20 @@ func (s *StateMutatorSuite) Test_StateMachineFactoryFails() { s.Run("failing factory is last", func() { s.kvStateMachineFactories[0], s.kvStateMachineFactories[1] = workingFactory, failingFactory //nolint:govet - _, dbUpdates, err := s.mutableState.EvolveState(s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) require.ErrorIs(s.T(), err, exception) - require.True(s.T(), dbUpdates.IsEmpty()) + require.True(s.T(), deferredDBOps.IsEmpty()) }) failingFactory.On("Create", s.candidate.View, s.candidate.ParentID, &s.parentState, &s.evolvingState).Return(nil, exception).Once() s.Run("failing factory is first", func() { s.kvStateMachineFactories[0], s.kvStateMachineFactories[1] = failingFactory, workingFactory //nolint:govet - _, dbUpdates, err := s.mutableState.EvolveState(s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) require.ErrorIs(s.T(), err, exception) - require.True(s.T(), dbUpdates.IsEmpty()) + require.True(s.T(), deferredDBOps.IsEmpty()) }) } @@ -483,19 +487,21 @@ func (s *StateMutatorSuite) Test_StateMachineProcessingServiceEventsFails() { s.Run("failing state machine is last", func() { s.kvStateMachines[0], s.kvStateMachines[1] = workingStateMachine, failingStateMachine //nolint:govet - _, dbUpdates, err := s.mutableState.EvolveState(s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) require.ErrorIs(s.T(), err, exception) require.False(s.T(), protocol.IsInvalidServiceEventError(err)) - require.True(s.T(), dbUpdates.IsEmpty()) + require.True(s.T(), deferredDBOps.IsEmpty()) }) failingStateMachine.On("EvolveState", mock.MatchedBy(emptySlice[flow.ServiceEvent]())).Return(exception).Once() s.Run("failing state machine is first", func() { s.kvStateMachines[0], s.kvStateMachines[1] = failingStateMachine, workingStateMachine //nolint:govet - _, dbUpdates, err := s.mutableState.EvolveState(s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) require.ErrorIs(s.T(), err, exception) require.False(s.T(), protocol.IsInvalidServiceEventError(err)) - require.True(s.T(), dbUpdates.IsEmpty()) + require.True(s.T(), deferredDBOps.IsEmpty()) }) } @@ -505,7 +511,7 @@ func (s *StateMutatorSuite) Test_StateMachineProcessingServiceEventsFails() { func (s *StateMutatorSuite) Test_StateMachineBuildFails() { workingStateMachine := *protocol_statemock.NewOrthogonalStoreStateMachine[protocol.KVStoreReader](s.T()) workingStateMachine.On("EvolveState", mock.MatchedBy(emptySlice[flow.ServiceEvent]())).Return(nil).Twice() - workingStateMachine.On("Build").Return(transaction.NewDeferredBlockPersist(), nil).Maybe() + workingStateMachine.On("Build").Return(deferred.NewDeferredBlockPersist(), nil).Maybe() exception := errors.New("exception") failingStateMachine := *protocol_statemock.NewOrthogonalStoreStateMachine[protocol.KVStoreReader](s.T()) @@ -514,19 +520,21 @@ func (s *StateMutatorSuite) Test_StateMachineBuildFails() { s.Run("failing state machine is last", func() { s.kvStateMachines[0], s.kvStateMachines[1] = workingStateMachine, failingStateMachine //nolint:govet - _, dbUpdates, err := s.mutableState.EvolveState(s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) require.ErrorIs(s.T(), err, exception) require.False(s.T(), protocol.IsInvalidServiceEventError(err)) - require.True(s.T(), dbUpdates.IsEmpty()) + require.Nil(s.T(), deferredDBOps.Execute(nil, flow.ZeroID, nil)) }) failingStateMachine.On("Build").Return(nil, exception).Once() s.Run("failing state machine is first", func() { s.kvStateMachines[0], s.kvStateMachines[1] = failingStateMachine, workingStateMachine //nolint:govet - _, dbUpdates, err := s.mutableState.EvolveState(s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) require.ErrorIs(s.T(), err, exception) require.False(s.T(), protocol.IsInvalidServiceEventError(err)) - require.True(s.T(), dbUpdates.IsEmpty()) + require.Nil(s.T(), deferredDBOps.Execute(nil, flow.ZeroID, nil)) }) } @@ -543,16 +551,26 @@ func (s *StateMutatorSuite) Test_EncodeFailed() { s.kvStateMachines[0] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).DuringEvolveState(modifyState).Mock() s.kvStateMachines[1] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).Mock() - s.protocolKVStoreDB.On("IndexTx", s.candidate.ID(), expectedResultingStateID).Return(func(*transaction.Tx) error { return nil }).Once() - s.protocolKVStoreDB.On("StoreTx", expectedResultingStateID, &s.evolvingState).Return(func(*transaction.Tx) error { return exception }).Once() + rw := storagemock.NewReaderBatchWriter(s.T()) + s.protocolKVStoreDB.On("BatchIndex", mock.Anything, mock.Anything, s.candidate.ID(), expectedResultingStateID).Return(nil).Once() + s.protocolKVStoreDB.On("BatchStore", mock.Anything, mock.Anything, expectedResultingStateID, &s.evolvingState).Return(exception).Once() - _, dbUpdates, err := s.mutableState.EvolveState(s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) require.NoError(s.T(), err) // `EvolveState` should succeed, because storing the encoded snapshot only happens when we execute dbUpdates // Provide the blockID and execute the resulting `DeferredDBUpdate`. Thereby, // the expected mock methods should be called, which is asserted by the testify framework - err = dbUpdates.Pending().WithBlock(s.candidate.ID())(&transaction.Tx{}) - require.ErrorIs(s.T(), err, exception) + blockID := s.candidate.ID() + err = deferredDBOps.Execute(nil, blockID, rw) + + // We expect the business logic to wrap the unexpected `exception` from above into an irrecoverable error. + // Therefore, we should _not_ be able to unwrap the returned error to match the original `exception`. + // Furthermore, the business logic should _not_ erroneously interpret the error as an invalid service event error. + irrecErr := irrecoverable.NewExceptionf("") + require.ErrorAs(s.T(), err, &irrecErr) + require.NotErrorIs(s.T(), err, exception) + require.False(s.T(), protocol.IsInvalidServiceEventError(err)) s.protocolKVStoreDB.AssertExpectations(s.T()) } @@ -629,11 +647,8 @@ func (m *mockStateTransition) Mock() protocol_statemock.OrthogonalStoreStateMach } }).Return(nil).Once() - deferredUpdate := storagemock.NewDeferredDBUpdate(m.T) - deferredUpdate.On("Execute", mock.Anything).Return(nil).Once() - deferredDBUpdates := transaction.NewDeferredBlockPersist().AddDbOp(deferredUpdate.Execute) stateMachine.On("Build").Run(func(args mock.Arguments) { require.True(m.T, evolveStateCalled, "Method `OrthogonalStoreStateMachine.Build` called before `EvolveState`!") - }).Return(deferredDBUpdates, nil).Once() + }).Return(deferred.NewDeferredBlockPersist(), nil).Once() return *stateMachine //nolint:govet } diff --git a/state/protocol/protocol_state/state/protocol_state.go b/state/protocol/protocol_state/state/protocol_state.go index 9316942f4d3..b7f7d3bdb0c 100644 --- a/state/protocol/protocol_state/state/protocol_state.go +++ b/state/protocol/protocol_state/state/protocol_state.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" @@ -15,8 +16,7 @@ import ( "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" "github.com/onflow/flow-go/state/protocol/protocol_state/pubsub" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage/deferred" ) // ProtocolState is an implementation of the read-only interface for protocol state, it allows querying information @@ -57,7 +57,12 @@ func (s *ProtocolState) EpochStateAtBlockID(blockID flow.Identifier) (protocol.E if err != nil { return nil, fmt.Errorf("could not query epoch protocol state at block (%x): %w", blockID, err) } - return inmem.NewEpochProtocolStateAdapter(protocolStateEntry, s.globalParams), nil + return inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: protocolStateEntry, + Params: s.globalParams, + }, + ) } // KVStoreAtBlockID returns protocol state at block ID. @@ -196,25 +201,26 @@ func newMutableProtocolState( // in the node software or state corruption, i.e. case (b). This is the only scenario where the error return // of this function is not nil. If such an exception is returned, continuing is not an option. func (s *MutableProtocolState) EvolveState( + deferredDBOps *deferred.DeferredBlockPersist, parentBlockID flow.Identifier, candidateView uint64, candidateSeals []*flow.Seal, -) (flow.Identifier, *transaction.DeferredBlockPersist, error) { +) (flow.Identifier, error) { serviceEvents, err := s.serviceEventsFromSeals(candidateSeals) if err != nil { - return flow.ZeroID, nil, fmt.Errorf("extracting service events from candidate seals failed: %w", err) + return flow.ZeroID, fmt.Errorf("extracting service events from candidate seals failed: %w", err) } parentStateID, stateMachines, evolvingState, err := s.initializeOrthogonalStateMachines(parentBlockID, candidateView) if err != nil { - return flow.ZeroID, nil, fmt.Errorf("failure initializing sub-state machines for evolving the Protocol State: %w", err) + return flow.ZeroID, fmt.Errorf("failure initializing sub-state machines for evolving the Protocol State: %w", err) } - resultingStateID, dbUpdates, err := s.build(parentStateID, stateMachines, serviceEvents, evolvingState) + resultingStateID, err := s.build(deferredDBOps, parentStateID, stateMachines, serviceEvents, evolvingState) if err != nil { - return flow.ZeroID, nil, fmt.Errorf("evolving and building the resulting Protocol State failed: %w", err) + return flow.ZeroID, fmt.Errorf("evolving and building the resulting Protocol State failed: %w", err) } - return resultingStateID, dbUpdates, nil + return resultingStateID, nil } // initializeOrthogonalStateMachines instantiates the sub-state machines that in aggregate evolve the protocol state. @@ -312,38 +318,49 @@ func (s *MutableProtocolState) serviceEventsFromSeals(candidateSeals []*flow.Sea // on the candidate block's ID, which is still unknown at the time of block construction. // - err: All error returns indicate potential state corruption and should therefore be treated as fatal. func (s *MutableProtocolState) build( + deferredDBOps *deferred.DeferredBlockPersist, parentStateID flow.Identifier, stateMachines []protocol_state.KeyValueStoreStateMachine, serviceEvents []flow.ServiceEvent, evolvingState protocol.KVStoreReader, -) (flow.Identifier, *transaction.DeferredBlockPersist, error) { +) (flow.Identifier, error) { for _, stateMachine := range stateMachines { err := stateMachine.EvolveState(serviceEvents) // state machine should only bubble up exceptions if err != nil { - return flow.ZeroID, nil, fmt.Errorf("exception from sub-state machine during state evolution: %w", err) + return flow.ZeroID, fmt.Errorf("exception from sub-state machine during state evolution: %w", err) } } // _after_ all state machines have ingested the available information, we build the resulting overall state - dbUpdates := transaction.NewDeferredBlockPersist() for _, stateMachine := range stateMachines { dbOps, err := stateMachine.Build() if err != nil { - return flow.ZeroID, nil, fmt.Errorf("unexpected exception from sub-state machine while building its output state: %w", err) + return flow.ZeroID, fmt.Errorf("unexpected exception from sub-state machine while building its output state: %w", err) } - dbUpdates.AddIndexingOps(dbOps.Pending()) + deferredDBOps.Chain(dbOps) } resultingStateID := evolvingState.ID() // We _always_ index the protocol state by the candidate block's ID. But only if the // state actually changed, we add a database operation to persist it. - dbUpdates.AddIndexingOp(func(blockID flow.Identifier, tx *transaction.Tx) error { - return s.kvStoreSnapshots.IndexTx(blockID, resultingStateID)(tx) + deferredDBOps.AddNextOperation(func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return s.kvStoreSnapshots.BatchIndex(lctx, rw, blockID, resultingStateID) }) + if parentStateID != resultingStateID { - // note that `SkipDuplicatesTx` is still required, because the result might equal to an earlier known state (we explicitly want to de-duplicate) - dbUpdates.AddDbOp(operation.SkipDuplicatesTx(s.kvStoreSnapshots.StoreTx(resultingStateID, evolvingState))) + deferredDBOps.AddNextOperation(func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + err := s.kvStoreSnapshots.BatchStore(lctx, rw, resultingStateID, evolvingState) + if err == nil { + return nil + } + // The only error that `ProtocolKVStore.BatchStore` might return is `storage.ErrDataMismatch`. + // Repeated requests to store the same state for the same id should be no-ops. It should be noted + // that the `resultingStateID` is a collision-resistant hash of the encoded state (including the + // state's version). Hence, mismatching data for the same id indicates a security-critical bug + // or state corruption, making continuation impossible. + return irrecoverable.NewExceptionf("unexpected error while trying to store new protocol state: %w", err) + }) } - return resultingStateID, dbUpdates, nil + return resultingStateID, nil } diff --git a/state/protocol/util/testing.go b/state/protocol/util/testing.go index 5c8e4da5e5f..cea00866a97 100644 --- a/state/protocol/util/testing.go +++ b/state/protocol/util/testing.go @@ -21,6 +21,7 @@ import ( protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" "github.com/onflow/flow-go/storage" bstorage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/utils/unittest" ) @@ -67,11 +68,13 @@ func MockSealValidator(sealsDB storage.Seals) module.SealValidator { func RunWithBootstrapState(t testing.TB, rootSnapshot protocol.Snapshot, f func(*badger.DB, *pbadger.State)) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() all := bstorage.InitAll(metrics, db) state, err := pbadger.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -91,6 +94,7 @@ func RunWithBootstrapState(t testing.TB, rootSnapshot protocol.Snapshot, f func( func RunWithFullProtocolState(t testing.TB, rootSnapshot protocol.Snapshot, f func(*badger.DB, *pbadger.ParticipantState)) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() @@ -98,7 +102,8 @@ func RunWithFullProtocolState(t testing.TB, rootSnapshot protocol.Snapshot, f fu all := bstorage.InitAll(metrics, db) state, err := pbadger.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -133,13 +138,15 @@ func RunWithFullProtocolState(t testing.TB, rootSnapshot protocol.Snapshot, f fu func RunWithFullProtocolStateAndMetrics(t testing.TB, rootSnapshot protocol.Snapshot, metrics module.ComplianceMetrics, f func(*badger.DB, *pbadger.ParticipantState)) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() tracer := trace.NewNoopTracer() log := zerolog.Nop() consumer := events.NewNoop() all := bstorage.InitAll(mmetrics.NewNoopCollector(), db) state, err := pbadger.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -156,6 +163,7 @@ func RunWithFullProtocolStateAndMetrics(t testing.TB, rootSnapshot protocol.Snap receiptValidator := MockReceiptValidator() sealValidator := MockSealValidator(all.Seals) mockTimer := MockBlockTimer() + fullState, err := pbadger.NewFullConsensusState( log, tracer, @@ -174,6 +182,7 @@ func RunWithFullProtocolStateAndMetrics(t testing.TB, rootSnapshot protocol.Snap func RunWithFullProtocolStateAndValidator(t testing.TB, rootSnapshot protocol.Snapshot, validator module.ReceiptValidator, f func(*badger.DB, *pbadger.ParticipantState)) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() @@ -181,7 +190,8 @@ func RunWithFullProtocolStateAndValidator(t testing.TB, rootSnapshot protocol.Sn all := bstorage.InitAll(metrics, db) state, err := pbadger.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -215,6 +225,7 @@ func RunWithFullProtocolStateAndValidator(t testing.TB, rootSnapshot protocol.Sn func RunWithFollowerProtocolState(t testing.TB, rootSnapshot protocol.Snapshot, f func(*badger.DB, *pbadger.FollowerState)) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() @@ -222,7 +233,8 @@ func RunWithFollowerProtocolState(t testing.TB, rootSnapshot protocol.Snapshot, all := bstorage.InitAll(metrics, db) state, err := pbadger.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -253,13 +265,15 @@ func RunWithFollowerProtocolState(t testing.TB, rootSnapshot protocol.Snapshot, func RunWithFullProtocolStateAndConsumer(t testing.TB, rootSnapshot protocol.Snapshot, consumer protocol.Consumer, f func(*badger.DB, *pbadger.ParticipantState)) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() all := bstorage.InitAll(metrics, db) state, err := pbadger.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -294,12 +308,14 @@ func RunWithFullProtocolStateAndConsumer(t testing.TB, rootSnapshot protocol.Sna func RunWithFullProtocolStateAndMetricsAndConsumer(t testing.TB, rootSnapshot protocol.Snapshot, metrics module.ComplianceMetrics, consumer protocol.Consumer, f func(*badger.DB, *pbadger.ParticipantState, protocol.MutableProtocolState)) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() tracer := trace.NewNoopTracer() log := zerolog.Nop() all := bstorage.InitAll(mmetrics.NewNoopCollector(), db) state, err := pbadger.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -344,6 +360,7 @@ func RunWithFullProtocolStateAndMetricsAndConsumer(t testing.TB, rootSnapshot pr func RunWithFollowerProtocolStateAndHeaders(t testing.TB, rootSnapshot protocol.Snapshot, f func(*badger.DB, *pbadger.FollowerState, storage.Headers, storage.Index)) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() @@ -351,7 +368,8 @@ func RunWithFollowerProtocolStateAndHeaders(t testing.TB, rootSnapshot protocol. all := bstorage.InitAll(metrics, db) state, err := pbadger.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -382,6 +400,7 @@ func RunWithFollowerProtocolStateAndHeaders(t testing.TB, rootSnapshot protocol. func RunWithFullProtocolStateAndMutator(t testing.TB, rootSnapshot protocol.Snapshot, f func(*badger.DB, *pbadger.ParticipantState, protocol.MutableProtocolState)) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() @@ -389,7 +408,8 @@ func RunWithFullProtocolStateAndMutator(t testing.TB, rootSnapshot protocol.Snap all := bstorage.InitAll(metrics, db) state, err := pbadger.Bootstrap( metrics, - db, + badgerimpl.ToDB(db), + lockManager, all.Headers, all.Seals, all.Results, @@ -418,6 +438,7 @@ func RunWithFullProtocolStateAndMutator(t testing.TB, rootSnapshot protocol.Snap sealValidator, ) require.NoError(t, err) + mutableProtocolState := protocol_state.NewMutableProtocolState( log, all.EpochProtocolStateEntries, diff --git a/storage/approvals.go b/storage/approvals.go index 7b8d29f0664..304ad3f3b04 100644 --- a/storage/approvals.go +++ b/storage/approvals.go @@ -1,6 +1,8 @@ package storage import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" ) @@ -15,11 +17,12 @@ import ( // a Verifier will always produce the same approval) type ResultApprovals interface { - // Store stores a ResultApproval by its ID. - // No errors are expected during normal operations. - Store(result *flow.ResultApproval) error - - // Index indexes a ResultApproval by result ID and chunk index. + // StoreMyApproval returns a functor, whose execution + // - will store the given ResultApproval + // - and index it by result ID and chunk index. + // - requires storage.LockIndexResultApproval lock to be held by the caller + // The functor's expected error returns during normal operation are: + // - `storage.ErrDataMismatch` if a *different* approval for the same key pair (ExecutionResultID, chunk index) is already indexed // // CAUTION: the Flow protocol requires multiple approvals for the same chunk from different verification // nodes. In other words, there are multiple different approvals for the same chunk. Therefore, the index @@ -31,8 +34,9 @@ type ResultApprovals interface { // still the method succeeds on each call. However, when attempting to index *different* ResultApproval IDs // for the same key (resultID, chunkIndex) this method returns an exception, as this should never happen for // a correct Verification Node indexing its own approvals. - // No errors are expected during normal operations. - Index(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) error + // It returns a functor so that some computation (such as computing approval ID) can be done + // before acquiring the lock. + StoreMyApproval(approval *flow.ResultApproval) func(lctx lockctx.Proof) error // ByID retrieves a ResultApproval by its ID. // Returns [storage.ErrNotFound] if no Approval with the given ID has been stored. diff --git a/storage/badger/all.go b/storage/badger/all.go index 99b0c9c29f5..6bd5f77bf07 100644 --- a/storage/badger/all.go +++ b/storage/badger/all.go @@ -10,24 +10,25 @@ import ( ) func InitAll(metrics module.CacheMetrics, db *badger.DB) *storage.All { - headers := NewHeaders(metrics, db) - guarantees := NewGuarantees(metrics, db, DefaultCacheSize) - seals := NewSeals(metrics, db) - index := NewIndex(metrics, db) - results := NewExecutionResults(metrics, db) - receipts := NewExecutionReceipts(metrics, db, results, DefaultCacheSize) - payloads := NewPayloads(db, index, guarantees, seals, receipts, results) - blocks := NewBlocks(db, headers, payloads) - qcs := NewQuorumCertificates(metrics, db, DefaultCacheSize) - setups := NewEpochSetups(metrics, db) - epochCommits := NewEpochCommits(metrics, db) - epochProtocolStateEntries := NewEpochProtocolStateEntries(metrics, setups, epochCommits, db, - DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) - protocolKVStore := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) - versionBeacons := store.NewVersionBeacons(badgerimpl.ToDB(db)) + sdb := badgerimpl.ToDB(db) + headers := store.NewHeaders(metrics, sdb) + guarantees := store.NewGuarantees(metrics, sdb, DefaultCacheSize) + seals := store.NewSeals(metrics, sdb) + index := store.NewIndex(metrics, sdb) + results := store.NewExecutionResults(metrics, sdb) + receipts := store.NewExecutionReceipts(metrics, sdb, results, DefaultCacheSize) + payloads := store.NewPayloads(sdb, index, guarantees, seals, receipts, results) + blocks := store.NewBlocks(sdb, headers, payloads) + qcs := store.NewQuorumCertificates(metrics, sdb, DefaultCacheSize) + setups := store.NewEpochSetups(metrics, sdb) + epochCommits := store.NewEpochCommits(metrics, sdb) + epochProtocolStateEntries := store.NewEpochProtocolStateEntries(metrics, setups, epochCommits, sdb, + store.DefaultEpochProtocolStateCacheSize, store.DefaultProtocolStateIndexCacheSize) + protocolKVStore := store.NewProtocolKVStore(metrics, sdb, store.DefaultProtocolKVStoreCacheSize, store.DefaultProtocolKVStoreByBlockIDCacheSize) + versionBeacons := store.NewVersionBeacons(sdb) - transactions := NewTransactions(metrics, db) - collections := NewCollections(db, transactions) + transactions := store.NewTransactions(metrics, sdb) + collections := store.NewCollections(sdb, transactions) return &storage.All{ Headers: headers, diff --git a/storage/badger/approvals.go b/storage/badger/approvals.go deleted file mode 100644 index eb3cf4ae820..00000000000 --- a/storage/badger/approvals.go +++ /dev/null @@ -1,136 +0,0 @@ -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// ResultApprovals implements persistent storage for result approvals. -type ResultApprovals struct { - db *badger.DB - cache *Cache[flow.Identifier, *flow.ResultApproval] -} - -func NewResultApprovals(collector module.CacheMetrics, db *badger.DB) *ResultApprovals { - - store := func(key flow.Identifier, val *flow.ResultApproval) func(*transaction.Tx) error { - return transaction.WithTx(operation.SkipDuplicates(operation.InsertResultApproval(val))) - } - - retrieve := func(approvalID flow.Identifier) func(tx *badger.Txn) (*flow.ResultApproval, error) { - var approval flow.ResultApproval - return func(tx *badger.Txn) (*flow.ResultApproval, error) { - err := operation.RetrieveResultApproval(approvalID, &approval)(tx) - return &approval, err - } - } - - res := &ResultApprovals{ - db: db, - cache: newCache[flow.Identifier, *flow.ResultApproval](collector, metrics.ResourceResultApprovals, - withLimit[flow.Identifier, *flow.ResultApproval](flow.DefaultTransactionExpiry+100), - withStore[flow.Identifier, *flow.ResultApproval](store), - withRetrieve[flow.Identifier, *flow.ResultApproval](retrieve)), - } - - return res -} - -func (r *ResultApprovals) store(approval *flow.ResultApproval) func(*transaction.Tx) error { - return r.cache.PutTx(approval.ID(), approval) -} - -func (r *ResultApprovals) byID(approvalID flow.Identifier) func(*badger.Txn) (*flow.ResultApproval, error) { - return func(tx *badger.Txn) (*flow.ResultApproval, error) { - val, err := r.cache.Get(approvalID)(tx) - if err != nil { - return nil, err - } - return val, nil - } -} - -func (r *ResultApprovals) byChunk(resultID flow.Identifier, chunkIndex uint64) func(*badger.Txn) (*flow.ResultApproval, error) { - return func(tx *badger.Txn) (*flow.ResultApproval, error) { - var approvalID flow.Identifier - err := operation.LookupResultApproval(resultID, chunkIndex, &approvalID)(tx) - if err != nil { - return nil, fmt.Errorf("could not lookup result approval ID: %w", err) - } - return r.byID(approvalID)(tx) - } -} - -func (r *ResultApprovals) index(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - err := operation.IndexResultApproval(resultID, chunkIndex, approvalID)(tx) - if err == nil { - return nil - } - - if !errors.Is(err, storage.ErrAlreadyExists) { - return err - } - - // When trying to index an approval for a result, and there is already - // an approval for the result, double check if the indexed approval is - // the same. - // We don't allow indexing multiple approvals per chunk because the - // store is only used within Verification nodes, and it is impossible - // for a Verification node to compute different approvals for the same - // chunk. - var storedApprovalID flow.Identifier - err = operation.LookupResultApproval(resultID, chunkIndex, &storedApprovalID)(tx) - if err != nil { - return fmt.Errorf("there is an approval stored already, but cannot retrieve it: %w", err) - } - - if storedApprovalID != approvalID { - return fmt.Errorf("attempting to store conflicting approval (result: %v, chunk index: %d): storing: %v, stored: %v. %w", - resultID, chunkIndex, approvalID, storedApprovalID, storage.ErrDataMismatch) - } - - return nil - } -} - -// Store stores a ResultApproval -func (r *ResultApprovals) Store(approval *flow.ResultApproval) error { - return operation.RetryOnConflictTx(r.db, transaction.Update, r.store(approval)) -} - -// Index indexes a ResultApproval by chunk (ResultID + chunk index). -// operation is idempotent (repeated calls with the same value are equivalent to -// just calling the method once; still the method succeeds on each call). -func (r *ResultApprovals) Index(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) error { - err := operation.RetryOnConflict(r.db.Update, r.index(resultID, chunkIndex, approvalID)) - if err != nil { - return fmt.Errorf("could not index result approval: %w", err) - } - return nil -} - -// ByID retrieves a ResultApproval by its ID -func (r *ResultApprovals) ByID(approvalID flow.Identifier) (*flow.ResultApproval, error) { - tx := r.db.NewTransaction(false) - defer tx.Discard() - return r.byID(approvalID)(tx) -} - -// ByChunk retrieves a ResultApproval by result ID and chunk index. The -// ResultApprovals store is only used within a verification node, where it is -// assumed that there is never more than one approval per chunk. -func (r *ResultApprovals) ByChunk(resultID flow.Identifier, chunkIndex uint64) (*flow.ResultApproval, error) { - tx := r.db.NewTransaction(false) - defer tx.Discard() - return r.byChunk(resultID, chunkIndex)(tx) -} diff --git a/storage/badger/approvals_test.go b/storage/badger/approvals_test.go deleted file mode 100644 index fad8abbbf46..00000000000 --- a/storage/badger/approvals_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestApprovalStoreAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewResultApprovals(metrics, db) - - approval := unittest.ResultApprovalFixture() - err := store.Store(approval) - require.NoError(t, err) - - err = store.Index(approval.Body.ExecutionResultID, approval.Body.ChunkIndex, approval.ID()) - require.NoError(t, err) - - byID, err := store.ByID(approval.ID()) - require.NoError(t, err) - require.Equal(t, approval, byID) - - byChunk, err := store.ByChunk(approval.Body.ExecutionResultID, approval.Body.ChunkIndex) - require.NoError(t, err) - require.Equal(t, approval, byChunk) - }) -} - -func TestApprovalStoreTwice(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewResultApprovals(metrics, db) - - approval := unittest.ResultApprovalFixture() - err := store.Store(approval) - require.NoError(t, err) - - err = store.Index(approval.Body.ExecutionResultID, approval.Body.ChunkIndex, approval.ID()) - require.NoError(t, err) - - err = store.Store(approval) - require.NoError(t, err) - - err = store.Index(approval.Body.ExecutionResultID, approval.Body.ChunkIndex, approval.ID()) - require.NoError(t, err) - }) -} - -func TestApprovalStoreTwoDifferentApprovalsShouldFail(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewResultApprovals(metrics, db) - - approval1 := unittest.ResultApprovalFixture() - approval2 := unittest.ResultApprovalFixture() - - err := store.Store(approval1) - require.NoError(t, err) - - err = store.Index(approval1.Body.ExecutionResultID, approval1.Body.ChunkIndex, approval1.ID()) - require.NoError(t, err) - - // we can store a different approval, but we can't index a different - // approval for the same chunk. - err = store.Store(approval2) - require.NoError(t, err) - - err = store.Index(approval1.Body.ExecutionResultID, approval1.Body.ChunkIndex, approval2.ID()) - require.ErrorIs(t, err, storage.ErrDataMismatch) - }) -} diff --git a/storage/badger/blocks.go b/storage/badger/blocks.go deleted file mode 100644 index 03ed95fc2fa..00000000000 --- a/storage/badger/blocks.go +++ /dev/null @@ -1,105 +0,0 @@ -package badger - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// Blocks implements a simple block storage around a badger DB. -type Blocks struct { - db *badger.DB - headers *Headers - payloads *Payloads -} - -// NewBlocks ... -func NewBlocks(db *badger.DB, headers *Headers, payloads *Payloads) *Blocks { - b := &Blocks{ - db: db, - headers: headers, - payloads: payloads, - } - return b -} - -func (b *Blocks) StoreTx(block *flow.Block) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - err := b.headers.storeTx(block.Header)(tx) - if err != nil { - return fmt.Errorf("could not store header %v: %w", block.Header.ID(), err) - } - err = b.payloads.storeTx(block.ID(), block.Payload)(tx) - if err != nil { - return fmt.Errorf("could not store payload: %w", err) - } - return nil - } -} - -func (b *Blocks) retrieveTx(blockID flow.Identifier) func(*badger.Txn) (*flow.Block, error) { - return func(tx *badger.Txn) (*flow.Block, error) { - header, err := b.headers.retrieveTx(blockID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve header: %w", err) - } - payload, err := b.payloads.retrieveTx(blockID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve payload: %w", err) - } - block := &flow.Block{ - Header: header, - Payload: payload, - } - return block, nil - } -} - -// Store ... -func (b *Blocks) Store(block *flow.Block) error { - return operation.RetryOnConflictTx(b.db, transaction.Update, b.StoreTx(block)) -} - -// ByID ... -func (b *Blocks) ByID(blockID flow.Identifier) (*flow.Block, error) { - tx := b.db.NewTransaction(false) - defer tx.Discard() - return b.retrieveTx(blockID)(tx) -} - -// ByHeight ... -func (b *Blocks) ByHeight(height uint64) (*flow.Block, error) { - tx := b.db.NewTransaction(false) - defer tx.Discard() - - blockID, err := b.headers.retrieveIdByHeightTx(height)(tx) - if err != nil { - return nil, err - } - return b.retrieveTx(blockID)(tx) -} - -// ByCollectionID ... -func (b *Blocks) ByCollectionID(collID flow.Identifier) (*flow.Block, error) { - var blockID flow.Identifier - err := b.db.View(operation.LookupCollectionBlock(collID, &blockID)) - if err != nil { - return nil, fmt.Errorf("could not look up block: %w", err) - } - return b.ByID(blockID) -} - -// IndexBlockForCollections ... -func (b *Blocks) IndexBlockForCollections(blockID flow.Identifier, collIDs []flow.Identifier) error { - for _, collID := range collIDs { - err := operation.RetryOnConflict(b.db.Update, operation.SkipDuplicates(operation.IndexCollectionBlock(collID, blockID))) - if err != nil { - return fmt.Errorf("could not index collection block (%x): %w", collID, err) - } - } - return nil -} diff --git a/storage/badger/blocks_test.go b/storage/badger/blocks_test.go deleted file mode 100644 index 189c2aa4076..00000000000 --- a/storage/badger/blocks_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - badgerstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestBlockStoreAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - cacheMetrics := &metrics.NoopCollector{} - // verify after storing a block should be able to retrieve it back - blocks := badgerstorage.InitAll(cacheMetrics, db).Blocks - block := unittest.FullBlockFixture() - block.SetPayload(unittest.PayloadFixture(unittest.WithAllTheFixins)) - - err := blocks.Store(&block) - require.NoError(t, err) - - retrieved, err := blocks.ByID(block.ID()) - require.NoError(t, err) - - require.Equal(t, &block, retrieved) - - // verify after a restart, the block stored in the database is the same - // as the original - blocksAfterRestart := badgerstorage.InitAll(cacheMetrics, db).Blocks - receivedAfterRestart, err := blocksAfterRestart.ByID(block.ID()) - require.NoError(t, err) - - require.Equal(t, &block, receivedAfterRestart) - }) -} diff --git a/storage/badger/cache.go b/storage/badger/cache.go index 67bc0b7f055..8032722b484 100644 --- a/storage/badger/cache.go +++ b/storage/badger/cache.go @@ -34,12 +34,6 @@ func noStore[K comparable, V any](_ K, _ V) func(*transaction.Tx) error { } } -func noopStore[K comparable, V any](_ K, _ V) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - return nil - } -} - type retrieveFunc[K comparable, V any] func(key K) func(*badger.Txn) (V, error) func withRetrieve[K comparable, V any](retrieve retrieveFunc[K, V]) func(*Cache[K, V]) { diff --git a/storage/badger/chunk_consumer_test.go b/storage/badger/chunk_consumer_test.go deleted file mode 100644 index c33fabc06b7..00000000000 --- a/storage/badger/chunk_consumer_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package badger - -import "testing" - -// 1. can init -// 2. can't set a process if never inited -// 3. can set after init -// 4. can read after init -// 5. can read after set -func TestChunkConsumer(t *testing.T) { - // TODO -} diff --git a/storage/badger/chunks_queue.go b/storage/badger/chunks_queue.go deleted file mode 100644 index 430abe0241b..00000000000 --- a/storage/badger/chunks_queue.go +++ /dev/null @@ -1,117 +0,0 @@ -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/chunks" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" -) - -// ChunksQueue stores a queue of chunk locators that assigned to me to verify. -// Job consumers can read the locators as job from the queue by index. -// Chunk locators stored in this queue are unique. -type ChunksQueue struct { - db *badger.DB -} - -const JobQueueChunksQueue = "JobQueueChunksQueue" - -// NewChunkQueue will initialize the underlying badger database of chunk locator queue. -func NewChunkQueue(db *badger.DB) *ChunksQueue { - return &ChunksQueue{ - db: db, - } -} - -// Init initializes chunk queue's latest index with the given default index. -func (q *ChunksQueue) Init(defaultIndex uint64) (bool, error) { - _, err := q.LatestIndex() - if errors.Is(err, storage.ErrNotFound) { - err = q.db.Update(operation.InitJobLatestIndex(JobQueueChunksQueue, defaultIndex)) - if err != nil { - return false, fmt.Errorf("could not init chunk locator queue with default index %v: %w", defaultIndex, err) - } - return true, nil - } - if err != nil { - return false, fmt.Errorf("could not get latest index: %w", err) - } - - return false, nil -} - -// StoreChunkLocator stores a new chunk locator that assigned to me to the job queue. -// A true will be returned, if the locator was new. -// A false will be returned, if the locator was duplicate. -func (q *ChunksQueue) StoreChunkLocator(locator *chunks.Locator) (bool, error) { - err := operation.RetryOnConflict(q.db.Update, func(tx *badger.Txn) error { - // make sure the chunk locator is unique - err := operation.InsertChunkLocator(locator)(tx) - if err != nil { - return fmt.Errorf("failed to insert chunk locator: %w", err) - } - - // read the latest index - var latest uint64 - err = operation.RetrieveJobLatestIndex(JobQueueChunksQueue, &latest)(tx) - if err != nil { - return fmt.Errorf("failed to retrieve job index for chunk locator queue: %w", err) - } - - // insert to the next index - next := latest + 1 - err = operation.InsertJobAtIndex(JobQueueChunksQueue, next, locator.ID())(tx) - if err != nil { - return fmt.Errorf("failed to set job index for chunk locator queue at index %v: %w", next, err) - } - - // update the next index as the latest index - err = operation.SetJobLatestIndex(JobQueueChunksQueue, next)(tx) - if err != nil { - return fmt.Errorf("failed to update latest index %v: %w", next, err) - } - - return nil - }) - - // was trying to store a duplicate locator - if errors.Is(err, storage.ErrAlreadyExists) { - return false, nil - } - if err != nil { - return false, fmt.Errorf("failed to store chunk locator: %w", err) - } - return true, nil -} - -// LatestIndex returns the index of the latest chunk locator stored in the queue. -func (q *ChunksQueue) LatestIndex() (uint64, error) { - var latest uint64 - err := q.db.View(operation.RetrieveJobLatestIndex(JobQueueChunksQueue, &latest)) - if err != nil { - return 0, fmt.Errorf("could not retrieve latest index for chunks queue: %w", err) - } - return latest, nil -} - -// AtIndex returns the chunk locator stored at the given index in the queue. -func (q *ChunksQueue) AtIndex(index uint64) (*chunks.Locator, error) { - var locatorID flow.Identifier - err := q.db.View(operation.RetrieveJobAtIndex(JobQueueChunksQueue, index, &locatorID)) - if err != nil { - return nil, fmt.Errorf("could not retrieve chunk locator in queue: %w", err) - } - - var locator chunks.Locator - err = q.db.View(operation.RetrieveChunkLocator(locatorID, &locator)) - if err != nil { - return nil, fmt.Errorf("could not retrieve locator for chunk id %v: %w", locatorID, err) - } - - return &locator, nil -} diff --git a/storage/badger/chunks_queue_test.go b/storage/badger/chunks_queue_test.go deleted file mode 100644 index e1e9350afe8..00000000000 --- a/storage/badger/chunks_queue_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package badger - -import "testing" - -// 1. should be able to read after store -// 2. should be able to read the latest index after store -// 3. should return false if a duplicate chunk is stored -// 4. should return true if a new chunk is stored -// 5. should return an increased index when a chunk is stored -// 6. storing 100 chunks concurrent should return last index as 100 -// 7. should not be able to read with wrong index -// 8. should return init index after init -// 9. storing chunk and updating the latest index should be atomic -func TestStoreAndRead(t *testing.T) { - // TODO -} diff --git a/storage/badger/cluster_blocks_test.go b/storage/badger/cluster_blocks_test.go deleted file mode 100644 index 64def9fec6b..00000000000 --- a/storage/badger/cluster_blocks_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package badger - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestClusterBlocksByHeight(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - chain := unittest.ClusterBlockChainFixture(5) - parent, blocks := chain[0], chain[1:] - - // add parent as boundary - err := db.Update(operation.IndexClusterBlockHeight(parent.Header.ChainID, parent.Header.Height, parent.ID())) - require.NoError(t, err) - - err = db.Update(operation.InsertClusterFinalizedHeight(parent.Header.ChainID, parent.Header.Height)) - require.NoError(t, err) - - // store a chain of blocks - for _, block := range blocks { - err := db.Update(procedure.InsertClusterBlock(&block)) - require.NoError(t, err) - - err = db.Update(procedure.FinalizeClusterBlock(block.Header.ID())) - require.NoError(t, err) - } - - clusterBlocks := NewClusterBlocks( - db, - blocks[0].Header.ChainID, - NewHeaders(metrics.NewNoopCollector(), db), - NewClusterPayloads(metrics.NewNoopCollector(), db), - ) - - // check if the block can be retrieved by height - for _, block := range blocks { - retrievedBlock, err := clusterBlocks.ByHeight(block.Header.Height) - require.NoError(t, err) - require.Equal(t, block.ID(), retrievedBlock.ID()) - } - }) -} diff --git a/storage/badger/cluster_payloads.go b/storage/badger/cluster_payloads.go deleted file mode 100644 index 6a7efae75b1..00000000000 --- a/storage/badger/cluster_payloads.go +++ /dev/null @@ -1,69 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/cluster" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// ClusterPayloads implements storage of block payloads for collection node -// cluster consensus. -type ClusterPayloads struct { - db *badger.DB - cache *Cache[flow.Identifier, *cluster.Payload] -} - -func NewClusterPayloads(cacheMetrics module.CacheMetrics, db *badger.DB) *ClusterPayloads { - - store := func(blockID flow.Identifier, payload *cluster.Payload) func(*transaction.Tx) error { - return transaction.WithTx(procedure.InsertClusterPayload(blockID, payload)) - } - - retrieve := func(blockID flow.Identifier) func(tx *badger.Txn) (*cluster.Payload, error) { - var payload cluster.Payload - return func(tx *badger.Txn) (*cluster.Payload, error) { - err := procedure.RetrieveClusterPayload(blockID, &payload)(tx) - return &payload, err - } - } - - cp := &ClusterPayloads{ - db: db, - cache: newCache[flow.Identifier, *cluster.Payload](cacheMetrics, metrics.ResourceClusterPayload, - withLimit[flow.Identifier, *cluster.Payload](flow.DefaultTransactionExpiry*4), - withStore(store), - withRetrieve(retrieve)), - } - - return cp -} - -func (cp *ClusterPayloads) storeTx(blockID flow.Identifier, payload *cluster.Payload) func(*transaction.Tx) error { - return cp.cache.PutTx(blockID, payload) -} - -func (cp *ClusterPayloads) retrieveTx(blockID flow.Identifier) func(*badger.Txn) (*cluster.Payload, error) { - return func(tx *badger.Txn) (*cluster.Payload, error) { - val, err := cp.cache.Get(blockID)(tx) - if err != nil { - return nil, err - } - return val, nil - } -} - -func (cp *ClusterPayloads) Store(blockID flow.Identifier, payload *cluster.Payload) error { - return operation.RetryOnConflictTx(cp.db, transaction.Update, cp.storeTx(blockID, payload)) -} - -func (cp *ClusterPayloads) ByBlockID(blockID flow.Identifier) (*cluster.Payload, error) { - tx := cp.db.NewTransaction(false) - defer tx.Discard() - return cp.retrieveTx(blockID)(tx) -} diff --git a/storage/badger/cluster_payloads_test.go b/storage/badger/cluster_payloads_test.go deleted file mode 100644 index c1b78361969..00000000000 --- a/storage/badger/cluster_payloads_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -func TestStoreRetrieveClusterPayload(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewClusterPayloads(metrics, db) - - blockID := unittest.IdentifierFixture() - expected := unittest.ClusterPayloadFixture(5) - - // store payload - err := store.Store(blockID, expected) - require.NoError(t, err) - - // fetch payload - payload, err := store.ByBlockID(blockID) - require.NoError(t, err) - require.Equal(t, expected, payload) - - // storing again should error with key already exists - err = store.Store(blockID, expected) - require.ErrorIs(t, err, storage.ErrAlreadyExists) - }) -} - -func TestClusterPayloadRetrieveWithoutStore(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewClusterPayloads(metrics, db) - - blockID := unittest.IdentifierFixture() - - _, err := store.ByBlockID(blockID) - assert.ErrorIs(t, err, storage.ErrNotFound) - }) -} diff --git a/storage/badger/collections.go b/storage/badger/collections.go deleted file mode 100644 index 62ce52d9b8e..00000000000 --- a/storage/badger/collections.go +++ /dev/null @@ -1,147 +0,0 @@ -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -type Collections struct { - db *badger.DB - transactions *Transactions -} - -func NewCollections(db *badger.DB, transactions *Transactions) *Collections { - c := &Collections{ - db: db, - transactions: transactions, - } - return c -} - -func (c *Collections) Store(collection *flow.Collection) error { - return operation.RetryOnConflictTx(c.db, transaction.Update, func(ttx *transaction.Tx) error { - light := collection.Light() - err := transaction.WithTx(operation.SkipDuplicates(operation.InsertCollection(&light)))(ttx) - if err != nil { - return fmt.Errorf("could not insert collection: %w", err) - } - - for _, tx := range collection.Transactions { - err = c.transactions.storeTx(tx)(ttx) - if err != nil { - return fmt.Errorf("could not insert transaction: %w", err) - } - } - - return nil - }) -} - -func (c *Collections) ByID(colID flow.Identifier) (*flow.Collection, error) { - var ( - light flow.LightCollection - collection flow.Collection - ) - - err := c.db.View(func(btx *badger.Txn) error { - err := operation.RetrieveCollection(colID, &light)(btx) - if err != nil { - return fmt.Errorf("could not retrieve collection: %w", err) - } - - for _, txID := range light.Transactions { - tx, err := c.transactions.ByID(txID) - if err != nil { - return fmt.Errorf("could not retrieve transaction: %w", err) - } - - collection.Transactions = append(collection.Transactions, tx) - } - - return nil - }) - if err != nil { - return nil, err - } - - return &collection, nil -} - -func (c *Collections) LightByID(colID flow.Identifier) (*flow.LightCollection, error) { - var collection flow.LightCollection - - err := c.db.View(func(tx *badger.Txn) error { - err := operation.RetrieveCollection(colID, &collection)(tx) - if err != nil { - return fmt.Errorf("could not retrieve collection: %w", err) - } - - return nil - }) - if err != nil { - return nil, err - } - - return &collection, nil -} - -func (c *Collections) Remove(colID flow.Identifier) error { - return operation.RetryOnConflict(c.db.Update, func(btx *badger.Txn) error { - err := operation.RemoveCollection(colID)(btx) - if err != nil { - return fmt.Errorf("could not remove collection: %w", err) - } - return nil - }) -} - -func (c *Collections) StoreLightAndIndexByTransaction(collection *flow.LightCollection) error { - return operation.RetryOnConflict(c.db.Update, func(tx *badger.Txn) error { - err := operation.InsertCollection(collection)(tx) - if err != nil { - return fmt.Errorf("could not insert collection: %w", err) - } - - for _, txID := range collection.Transactions { - err = operation.IndexCollectionByTransaction(txID, collection.ID())(tx) - if errors.Is(err, storage.ErrAlreadyExists) { - continue - } - if err != nil { - return fmt.Errorf("could not insert transaction ID: %w", err) - } - } - - return nil - }) -} - -func (c *Collections) LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error) { - var collection flow.LightCollection - err := c.db.View(func(tx *badger.Txn) error { - collID := &flow.Identifier{} - err := operation.RetrieveCollectionID(txID, collID)(tx) - if err != nil { - return fmt.Errorf("could not retrieve collection id: %w", err) - } - - err = operation.RetrieveCollection(*collID, &collection)(tx) - if err != nil { - return fmt.Errorf("could not retrieve collection: %w", err) - } - - return nil - }) - if err != nil { - return nil, err - } - - return &collection, nil -} diff --git a/storage/badger/collections_test.go b/storage/badger/collections_test.go deleted file mode 100644 index 72ea666b3d7..00000000000 --- a/storage/badger/collections_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestCollections(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - metrics := metrics.NewNoopCollector() - transactions := bstorage.NewTransactions(metrics, db) - collections := bstorage.NewCollections(db, transactions) - - // create a light collection with three transactions - expected := unittest.CollectionFixture(3).Light() - - // store the light collection and the transaction index - err := collections.StoreLightAndIndexByTransaction(&expected) - require.NoError(t, err) - - // retrieve the light collection by collection id - actual, err := collections.LightByID(expected.ID()) - require.NoError(t, err) - - // check if the light collection was indeed persisted - assert.Equal(t, &expected, actual) - - expectedID := expected.ID() - - // retrieve the collection light id by each of its transaction id - for _, txID := range expected.Transactions { - collLight, err := collections.LightByTransactionID(txID) - actualID := collLight.ID() - // check that the collection id can indeed be retrieved by transaction id - require.NoError(t, err) - assert.Equal(t, expectedID, actualID) - } - - }) -} - -func TestCollections_IndexDuplicateTx(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - transactions := bstorage.NewTransactions(metrics, db) - collections := bstorage.NewCollections(db, transactions) - - // create two collections which share 1 transaction - col1 := unittest.CollectionFixture(2) - col2 := unittest.CollectionFixture(1) - dupTx := col1.Transactions[0] // the duplicated transaction - col2Tx := col2.Transactions[0] // transaction that's only in col2 - col2.Transactions = append(col2.Transactions, dupTx) - - // insert col1 - col1Light := col1.Light() - err := collections.StoreLightAndIndexByTransaction(&col1Light) - require.NoError(t, err) - - // insert col2 - col2Light := col2.Light() - err = collections.StoreLightAndIndexByTransaction(&col2Light) - require.NoError(t, err) - - // should be able to retrieve col2 by ID - gotLightByCol2ID, err := collections.LightByID(col2.ID()) - require.NoError(t, err) - assert.Equal(t, &col2Light, gotLightByCol2ID) - - // should be able to retrieve col2 by the transaction which only appears in col2 - _, err = collections.LightByTransactionID(col2Tx.ID()) - require.NoError(t, err) - - // col1 (not col2) should be indexed by the shared transaction (since col1 was inserted first) - gotLightByDupTxID, err := collections.LightByTransactionID(dupTx.ID()) - require.NoError(t, err) - assert.Equal(t, &col1Light, gotLightByDupTxID) - }) -} diff --git a/storage/badger/epoch_commits.go b/storage/badger/epoch_commits.go deleted file mode 100644 index 5006781b1ad..00000000000 --- a/storage/badger/epoch_commits.go +++ /dev/null @@ -1,66 +0,0 @@ -package badger - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -type EpochCommits struct { - db *badger.DB - cache *Cache[flow.Identifier, *flow.EpochCommit] -} - -func NewEpochCommits(collector module.CacheMetrics, db *badger.DB) *EpochCommits { - - store := func(id flow.Identifier, commit *flow.EpochCommit) func(*transaction.Tx) error { - return transaction.WithTx(operation.SkipDuplicates(operation.InsertEpochCommit(id, commit))) - } - - retrieve := func(id flow.Identifier) func(*badger.Txn) (*flow.EpochCommit, error) { - return func(tx *badger.Txn) (*flow.EpochCommit, error) { - var commit flow.EpochCommit - err := operation.RetrieveEpochCommit(id, &commit)(tx) - return &commit, err - } - } - - ec := &EpochCommits{ - db: db, - cache: newCache[flow.Identifier, *flow.EpochCommit](collector, metrics.ResourceEpochCommit, - withLimit[flow.Identifier, *flow.EpochCommit](4*flow.DefaultTransactionExpiry), - withStore(store), - withRetrieve(retrieve)), - } - - return ec -} - -func (ec *EpochCommits) StoreTx(commit *flow.EpochCommit) func(*transaction.Tx) error { - return ec.cache.PutTx(commit.ID(), commit) -} - -func (ec *EpochCommits) retrieveTx(commitID flow.Identifier) func(tx *badger.Txn) (*flow.EpochCommit, error) { - return func(tx *badger.Txn) (*flow.EpochCommit, error) { - val, err := ec.cache.Get(commitID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve EpochCommit event with id %x: %w", commitID, err) - } - return val, nil - } -} - -// ByID will return the EpochCommit event by its ID. -// Error returns: -// * storage.ErrNotFound if no EpochCommit with the ID exists -func (ec *EpochCommits) ByID(commitID flow.Identifier) (*flow.EpochCommit, error) { - tx := ec.db.NewTransaction(false) - defer tx.Discard() - return ec.retrieveTx(commitID)(tx) -} diff --git a/storage/badger/epoch_commits_test.go b/storage/badger/epoch_commits_test.go deleted file mode 100644 index ee2819c87a4..00000000000 --- a/storage/badger/epoch_commits_test.go +++ /dev/null @@ -1,178 +0,0 @@ -package badger_test - -import ( - "io" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/onflow/crypto" - "github.com/onflow/go-ethereum/rlp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/vmihailenco/msgpack/v4" - - "github.com/onflow/flow-go/model/encodable" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - badgerstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestEpochCommitStoreAndRetrieve tests that a commit can be stored, retrieved and attempted to be stored again without an error -func TestEpochCommitStoreAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewEpochCommits(metrics, db) - - // attempt to get a invalid commit - _, err := store.ByID(unittest.IdentifierFixture()) - assert.ErrorIs(t, err, storage.ErrNotFound) - - // store a commit in db - expected := unittest.EpochCommitFixture() - err = transaction.Update(db, func(tx *transaction.Tx) error { - return store.StoreTx(expected)(tx) - }) - require.NoError(t, err) - - // retrieve the commit by ID - actual, err := store.ByID(expected.ID()) - require.NoError(t, err) - assert.Equal(t, expected, actual) - - // test storing same epoch commit - err = transaction.Update(db, func(tx *transaction.Tx) error { - return store.StoreTx(expected)(tx) - }) - require.NoError(t, err) - }) -} - -// epochCommitV0 is a version of [flow.EpochCommit] without the [flow.DKGIndexMap] field. -// This exact structure was used prior to Protocol State Version 2, and we would like to ensure that new version of [flow.EpochCommit] -// is backward compatible with this structure. -// It is used only in tests. -// TODO(EFM, #6794): Remove this once we complete the network upgrade -type epochCommitV0 struct { - // Counter is the epoch counter of the epoch being committed - Counter uint64 - // ClusterQCs is an ordered list of root quorum certificates, one per cluster. - // EpochCommit.ClustersQCs[i] is the QC for EpochSetup.Assignments[i] - ClusterQCs []flow.ClusterQCVoteData - // DKGGroupKey is the group public key produced by the DKG associated with this epoch. - // It is used to verify Random Beacon signatures for the epoch with counter, Counter. - DKGGroupKey crypto.PublicKey - // DKGParticipantKeys is a list of public keys, one per DKG participant, ordered by Random Beacon index. - // This list is the output of the DKG associated with this epoch. - // It is used to verify Random Beacon signatures for the epoch with counter, Counter. - // CAUTION: This list may include keys for nodes which do not exist in the consensus committee - // and may NOT include keys for all nodes in the consensus committee. - DKGParticipantKeys []crypto.PublicKey -} - -func (commit *epochCommitV0) ID() flow.Identifier { - return flow.MakeID(commit) -} - -func (commit *epochCommitV0) EncodeRLP(w io.Writer) error { - rlpEncodable := struct { - Counter uint64 - ClusterQCs []flow.ClusterQCVoteData - DKGGroupKey []byte - DKGParticipantKeys [][]byte - }{ - Counter: commit.Counter, - ClusterQCs: commit.ClusterQCs, - DKGGroupKey: commit.DKGGroupKey.Encode(), - DKGParticipantKeys: make([][]byte, 0, len(commit.DKGParticipantKeys)), - } - for _, key := range commit.DKGParticipantKeys { - rlpEncodable.DKGParticipantKeys = append(rlpEncodable.DKGParticipantKeys, key.Encode()) - } - - return rlp.Encode(w, rlpEncodable) -} - -// encodableCommit represents encoding of epochCommitV0, it is used for serialization purposes and is used only in tests. -// TODO(EFM, #6794): Remove this once we complete the network upgrade -type encodableCommit struct { - Counter uint64 - ClusterQCs []flow.ClusterQCVoteData - DKGGroupKey encodable.RandomBeaconPubKey - DKGParticipantKeys []encodable.RandomBeaconPubKey -} - -func encodableFromCommit(commit *epochCommitV0) encodableCommit { - encKeys := make([]encodable.RandomBeaconPubKey, 0, len(commit.DKGParticipantKeys)) - for _, key := range commit.DKGParticipantKeys { - encKeys = append(encKeys, encodable.RandomBeaconPubKey{PublicKey: key}) - } - return encodableCommit{ - Counter: commit.Counter, - ClusterQCs: commit.ClusterQCs, - DKGGroupKey: encodable.RandomBeaconPubKey{PublicKey: commit.DKGGroupKey}, - DKGParticipantKeys: encKeys, - } -} - -func commitFromEncodable(enc encodableCommit) epochCommitV0 { - dkgKeys := make([]crypto.PublicKey, 0, len(enc.DKGParticipantKeys)) - for _, key := range enc.DKGParticipantKeys { - dkgKeys = append(dkgKeys, key.PublicKey) - } - return epochCommitV0{ - Counter: enc.Counter, - ClusterQCs: enc.ClusterQCs, - DKGGroupKey: enc.DKGGroupKey.PublicKey, - DKGParticipantKeys: dkgKeys, - } -} - -func (commit *epochCommitV0) MarshalMsgpack() ([]byte, error) { - return msgpack.Marshal(encodableFromCommit(commit)) -} - -func (commit *epochCommitV0) UnmarshalMsgpack(b []byte) error { - var enc encodableCommit - err := msgpack.Unmarshal(b, &enc) - if err != nil { - return err - } - *commit = commitFromEncodable(enc) - return nil -} - -// TestStoreV0AndDecodeV1 tests that an [flow.EpochCommit] without [flow.DKGIndexMap](v0) field can be stored and -// later retrieved as a [flow.EpochCommit](v1) without any errors or data loss. -// This test verifies that the [flow.EpochCommit] is backward compatible with respect to the [flow.DKGIndexMap] field. -// TODO(EFM, #6794): Remove this once we complete the network upgrade -func TestStoreV0AndDecodeV1(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - v1 := unittest.EpochCommitFixture() - v0 := &epochCommitV0{ - Counter: v1.Counter, - ClusterQCs: v1.ClusterQCs, - DKGGroupKey: v1.DKGGroupKey, - DKGParticipantKeys: v1.DKGParticipantKeys, - } - require.Equal(t, v0.ID(), v1.ID()) - - err := transaction.Update(db, func(tx *transaction.Tx) error { - return operation.InsertEpochCommitV0(v0.ID(), v0)(tx.DBTxn) - }) - require.NoError(t, err) - - var actual flow.EpochCommit - err = transaction.View(db, func(tx *transaction.Tx) error { - return operation.RetrieveEpochCommit(v0.ID(), &actual)(tx.DBTxn) - }) - require.NoError(t, err) - require.Equal(t, v1, &actual) - require.Equal(t, v0.ID(), actual.ID()) - require.Equal(t, v1, &actual) - }) - -} diff --git a/storage/badger/epoch_setups.go b/storage/badger/epoch_setups.go deleted file mode 100644 index 9f0c0d1e7ca..00000000000 --- a/storage/badger/epoch_setups.go +++ /dev/null @@ -1,67 +0,0 @@ -package badger - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -type EpochSetups struct { - db *badger.DB - cache *Cache[flow.Identifier, *flow.EpochSetup] -} - -// NewEpochSetups instantiates a new EpochSetups storage. -func NewEpochSetups(collector module.CacheMetrics, db *badger.DB) *EpochSetups { - - store := func(id flow.Identifier, setup *flow.EpochSetup) func(*transaction.Tx) error { - return transaction.WithTx(operation.SkipDuplicates(operation.InsertEpochSetup(id, setup))) - } - - retrieve := func(id flow.Identifier) func(*badger.Txn) (*flow.EpochSetup, error) { - return func(tx *badger.Txn) (*flow.EpochSetup, error) { - var setup flow.EpochSetup - err := operation.RetrieveEpochSetup(id, &setup)(tx) - return &setup, err - } - } - - es := &EpochSetups{ - db: db, - cache: newCache[flow.Identifier, *flow.EpochSetup](collector, metrics.ResourceEpochSetup, - withLimit[flow.Identifier, *flow.EpochSetup](4*flow.DefaultTransactionExpiry), - withStore(store), - withRetrieve(retrieve)), - } - - return es -} - -func (es *EpochSetups) StoreTx(setup *flow.EpochSetup) func(tx *transaction.Tx) error { - return es.cache.PutTx(setup.ID(), setup) -} - -func (es *EpochSetups) retrieveTx(setupID flow.Identifier) func(tx *badger.Txn) (*flow.EpochSetup, error) { - return func(tx *badger.Txn) (*flow.EpochSetup, error) { - val, err := es.cache.Get(setupID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve EpochSetup event with id %x: %w", setupID, err) - } - return val, nil - } -} - -// ByID will return the EpochSetup event by its ID. -// Error returns: -// * storage.ErrNotFound if no EpochSetup with the ID exists -func (es *EpochSetups) ByID(setupID flow.Identifier) (*flow.EpochSetup, error) { - tx := es.db.NewTransaction(false) - defer tx.Discard() - return es.retrieveTx(setupID)(tx) -} diff --git a/storage/badger/epoch_setups_test.go b/storage/badger/epoch_setups_test.go deleted file mode 100644 index d51292cc21e..00000000000 --- a/storage/badger/epoch_setups_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// TestEpochSetupStoreAndRetrieve tests that a setup can be stored, retrieved and attempted to be stored again without an error -func TestEpochSetupStoreAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewEpochSetups(metrics, db) - - // attempt to get a setup that doesn't exist - _, err := store.ByID(unittest.IdentifierFixture()) - assert.ErrorIs(t, err, storage.ErrNotFound) - - // store a setup in db - expected := unittest.EpochSetupFixture() - err = operation.RetryOnConflictTx(db, transaction.Update, store.StoreTx(expected)) - require.NoError(t, err) - - // retrieve the setup by ID - actual, err := store.ByID(expected.ID()) - require.NoError(t, err) - assert.Equal(t, expected, actual) - - // test storing same epoch setup - err = operation.RetryOnConflictTx(db, transaction.Update, store.StoreTx(expected)) - require.NoError(t, err) - }) -} diff --git a/storage/badger/guarantees.go b/storage/badger/guarantees.go deleted file mode 100644 index b7befd342b6..00000000000 --- a/storage/badger/guarantees.go +++ /dev/null @@ -1,66 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// Guarantees implements persistent storage for collection guarantees. -type Guarantees struct { - db *badger.DB - cache *Cache[flow.Identifier, *flow.CollectionGuarantee] -} - -func NewGuarantees(collector module.CacheMetrics, db *badger.DB, cacheSize uint) *Guarantees { - - store := func(collID flow.Identifier, guarantee *flow.CollectionGuarantee) func(*transaction.Tx) error { - return transaction.WithTx(operation.SkipDuplicates(operation.InsertGuarantee(collID, guarantee))) - } - - retrieve := func(collID flow.Identifier) func(*badger.Txn) (*flow.CollectionGuarantee, error) { - var guarantee flow.CollectionGuarantee - return func(tx *badger.Txn) (*flow.CollectionGuarantee, error) { - err := operation.RetrieveGuarantee(collID, &guarantee)(tx) - return &guarantee, err - } - } - - g := &Guarantees{ - db: db, - cache: newCache[flow.Identifier, *flow.CollectionGuarantee](collector, metrics.ResourceGuarantee, - withLimit[flow.Identifier, *flow.CollectionGuarantee](cacheSize), - withStore(store), - withRetrieve(retrieve)), - } - - return g -} - -func (g *Guarantees) storeTx(guarantee *flow.CollectionGuarantee) func(*transaction.Tx) error { - return g.cache.PutTx(guarantee.ID(), guarantee) -} - -func (g *Guarantees) retrieveTx(collID flow.Identifier) func(*badger.Txn) (*flow.CollectionGuarantee, error) { - return func(tx *badger.Txn) (*flow.CollectionGuarantee, error) { - val, err := g.cache.Get(collID)(tx) - if err != nil { - return nil, err - } - return val, nil - } -} - -func (g *Guarantees) Store(guarantee *flow.CollectionGuarantee) error { - return operation.RetryOnConflictTx(g.db, transaction.Update, g.storeTx(guarantee)) -} - -func (g *Guarantees) ByCollectionID(collID flow.Identifier) (*flow.CollectionGuarantee, error) { - tx := g.db.NewTransaction(false) - defer tx.Discard() - return g.retrieveTx(collID)(tx) -} diff --git a/storage/badger/guarantees_test.go b/storage/badger/guarantees_test.go deleted file mode 100644 index a62a157354f..00000000000 --- a/storage/badger/guarantees_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -func TestGuaranteeStoreRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewGuarantees(metrics, db, 1000) - - // abiturary guarantees - expected := unittest.CollectionGuaranteeFixture() - - // retrieve guarantee without stored - _, err := store.ByCollectionID(expected.ID()) - require.ErrorIs(t, err, storage.ErrNotFound) - - // store guarantee - err = store.Store(expected) - require.NoError(t, err) - - // retreive by coll idx - actual, err := store.ByCollectionID(expected.ID()) - require.NoError(t, err) - require.Equal(t, expected, actual) - }) -} diff --git a/storage/badger/headers.go b/storage/badger/headers.go deleted file mode 100644 index cea044f445b..00000000000 --- a/storage/badger/headers.go +++ /dev/null @@ -1,196 +0,0 @@ -package badger - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// Headers implements a simple read-only header storage around a badger DB. -type Headers struct { - db *badger.DB - cache *Cache[flow.Identifier, *flow.Header] - heightCache *Cache[uint64, flow.Identifier] -} - -func NewHeaders(collector module.CacheMetrics, db *badger.DB) *Headers { - - store := func(blockID flow.Identifier, header *flow.Header) func(*transaction.Tx) error { - return transaction.WithTx(operation.InsertHeader(blockID, header)) - } - - // CAUTION: should only be used to index FINALIZED blocks by their - // respective height - storeHeight := func(height uint64, id flow.Identifier) func(*transaction.Tx) error { - return transaction.WithTx(operation.IndexBlockHeight(height, id)) - } - - retrieve := func(blockID flow.Identifier) func(tx *badger.Txn) (*flow.Header, error) { - var header flow.Header - return func(tx *badger.Txn) (*flow.Header, error) { - err := operation.RetrieveHeader(blockID, &header)(tx) - return &header, err - } - } - - retrieveHeight := func(height uint64) func(tx *badger.Txn) (flow.Identifier, error) { - return func(tx *badger.Txn) (flow.Identifier, error) { - var id flow.Identifier - err := operation.LookupBlockHeight(height, &id)(tx) - return id, err - } - } - - h := &Headers{ - db: db, - cache: newCache(collector, metrics.ResourceHeader, - withLimit[flow.Identifier, *flow.Header](4*flow.DefaultTransactionExpiry), - withStore(store), - withRetrieve(retrieve)), - - heightCache: newCache(collector, metrics.ResourceFinalizedHeight, - withLimit[uint64, flow.Identifier](4*flow.DefaultTransactionExpiry), - withStore(storeHeight), - withRetrieve(retrieveHeight)), - } - - return h -} - -func (h *Headers) storeTx(header *flow.Header) func(*transaction.Tx) error { - return h.cache.PutTx(header.ID(), header) -} - -func (h *Headers) retrieveTx(blockID flow.Identifier) func(*badger.Txn) (*flow.Header, error) { - return func(tx *badger.Txn) (*flow.Header, error) { - val, err := h.cache.Get(blockID)(tx) - if err != nil { - return nil, err - } - return val, nil - } -} - -// results in `storage.ErrNotFound` for unknown height -func (h *Headers) retrieveIdByHeightTx(height uint64) func(*badger.Txn) (flow.Identifier, error) { - return func(tx *badger.Txn) (flow.Identifier, error) { - blockID, err := h.heightCache.Get(height)(tx) - if err != nil { - return flow.ZeroID, fmt.Errorf("failed to retrieve block ID for height %d: %w", height, err) - } - return blockID, nil - } -} - -func (h *Headers) Store(header *flow.Header) error { - return operation.RetryOnConflictTx(h.db, transaction.Update, h.storeTx(header)) -} - -func (h *Headers) ByBlockID(blockID flow.Identifier) (*flow.Header, error) { - tx := h.db.NewTransaction(false) - defer tx.Discard() - return h.retrieveTx(blockID)(tx) -} - -func (h *Headers) ByHeight(height uint64) (*flow.Header, error) { - tx := h.db.NewTransaction(false) - defer tx.Discard() - - blockID, err := h.retrieveIdByHeightTx(height)(tx) - if err != nil { - return nil, err - } - return h.retrieveTx(blockID)(tx) -} - -// Exists returns true if a header with the given ID has been stored. -// No errors are expected during normal operation. -func (h *Headers) Exists(blockID flow.Identifier) (bool, error) { - // if the block is in the cache, return true - if ok := h.cache.IsCached(blockID); ok { - return ok, nil - } - // otherwise, check badger store - var exists bool - err := h.db.View(operation.BlockExists(blockID, &exists)) - if err != nil { - return false, fmt.Errorf("could not check existence: %w", err) - } - return exists, nil -} - -// BlockIDByHeight returns the block ID that is finalized at the given height. It is an optimized -// version of `ByHeight` that skips retrieving the block. Expected errors during normal operations: -// - `storage.ErrNotFound` if no finalized block is known at given height. -func (h *Headers) BlockIDByHeight(height uint64) (flow.Identifier, error) { - tx := h.db.NewTransaction(false) - defer tx.Discard() - - blockID, err := h.retrieveIdByHeightTx(height)(tx) - if err != nil { - return flow.ZeroID, fmt.Errorf("could not lookup block id by height %d: %w", height, err) - } - return blockID, nil -} - -func (h *Headers) ByParentID(parentID flow.Identifier) ([]*flow.Header, error) { - var blockIDs flow.IdentifierList - err := h.db.View(procedure.LookupBlockChildren(parentID, &blockIDs)) - if err != nil { - return nil, fmt.Errorf("could not look up children: %w", err) - } - headers := make([]*flow.Header, 0, len(blockIDs)) - for _, blockID := range blockIDs { - header, err := h.ByBlockID(blockID) - if err != nil { - return nil, fmt.Errorf("could not retrieve child (%x): %w", blockID, err) - } - headers = append(headers, header) - } - return headers, nil -} - -func (h *Headers) FindHeaders(filter func(header *flow.Header) bool) ([]flow.Header, error) { - blocks := make([]flow.Header, 0, 1) - err := h.db.View(operation.FindHeaders(filter, &blocks)) - return blocks, err -} - -// RollbackExecutedBlock update the executed block header to the given header. -// only useful for execution node to roll back executed block height -func (h *Headers) RollbackExecutedBlock(header *flow.Header) error { - return operation.RetryOnConflict(h.db.Update, func(txn *badger.Txn) error { - var blockID flow.Identifier - err := operation.RetrieveExecutedBlock(&blockID)(txn) - if err != nil { - return fmt.Errorf("cannot lookup executed block: %w", err) - } - - var highest flow.Header - err = operation.RetrieveHeader(blockID, &highest)(txn) - if err != nil { - return fmt.Errorf("cannot retrieve executed header: %w", err) - } - - // only rollback if the given height is below the current executed height - if header.Height >= highest.Height { - return fmt.Errorf("cannot roolback. expect the target height %v to be lower than highest executed height %v, but actually is not", - header.Height, highest.Height, - ) - } - - err = operation.UpdateExecutedBlock(header.ID())(txn) - if err != nil { - return fmt.Errorf("cannot update highest executed block: %w", err) - } - - return nil - }) -} diff --git a/storage/badger/headers_test.go b/storage/badger/headers_test.go deleted file mode 100644 index 5dc07775859..00000000000 --- a/storage/badger/headers_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/onflow/flow-go/storage/badger/operation" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -func TestHeaderStoreRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - headers := badgerstorage.NewHeaders(metrics, db) - - block := unittest.BlockFixture() - - // store header - err := headers.Store(block.Header) - require.NoError(t, err) - - // index the header - err = operation.RetryOnConflict(db.Update, operation.IndexBlockHeight(block.Header.Height, block.ID())) - require.NoError(t, err) - - // retrieve header by height - actual, err := headers.ByHeight(block.Header.Height) - require.NoError(t, err) - require.Equal(t, block.Header, actual) - }) -} - -func TestHeaderRetrieveWithoutStore(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - headers := badgerstorage.NewHeaders(metrics, db) - - header := unittest.BlockHeaderFixture() - - // retrieve header by height, should err as not store before height - _, err := headers.ByHeight(header.Height) - require.ErrorIs(t, err, storage.ErrNotFound) - }) -} diff --git a/storage/badger/index.go b/storage/badger/index.go deleted file mode 100644 index fd8aa75e813..00000000000 --- a/storage/badger/index.go +++ /dev/null @@ -1,67 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// Index implements a simple read-only payload storage around a badger DB. -type Index struct { - db *badger.DB - cache *Cache[flow.Identifier, *flow.Index] -} - -func NewIndex(collector module.CacheMetrics, db *badger.DB) *Index { - - store := func(blockID flow.Identifier, index *flow.Index) func(*transaction.Tx) error { - return transaction.WithTx(procedure.InsertIndex(blockID, index)) - } - - retrieve := func(blockID flow.Identifier) func(tx *badger.Txn) (*flow.Index, error) { - var index flow.Index - return func(tx *badger.Txn) (*flow.Index, error) { - err := procedure.RetrieveIndex(blockID, &index)(tx) - return &index, err - } - } - - p := &Index{ - db: db, - cache: newCache[flow.Identifier, *flow.Index](collector, metrics.ResourceIndex, - withLimit[flow.Identifier, *flow.Index](flow.DefaultTransactionExpiry+100), - withStore(store), - withRetrieve(retrieve)), - } - - return p -} - -func (i *Index) storeTx(blockID flow.Identifier, index *flow.Index) func(*transaction.Tx) error { - return i.cache.PutTx(blockID, index) -} - -func (i *Index) retrieveTx(blockID flow.Identifier) func(*badger.Txn) (*flow.Index, error) { - return func(tx *badger.Txn) (*flow.Index, error) { - val, err := i.cache.Get(blockID)(tx) - if err != nil { - return nil, err - } - return val, nil - } -} - -func (i *Index) Store(blockID flow.Identifier, index *flow.Index) error { - return operation.RetryOnConflictTx(i.db, transaction.Update, i.storeTx(blockID, index)) -} - -func (i *Index) ByBlockID(blockID flow.Identifier) (*flow.Index, error) { - tx := i.db.NewTransaction(false) - defer tx.Discard() - return i.retrieveTx(blockID)(tx) -} diff --git a/storage/badger/index_test.go b/storage/badger/index_test.go deleted file mode 100644 index 1051e95f5b0..00000000000 --- a/storage/badger/index_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -func TestIndexStoreRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewIndex(metrics, db) - - blockID := unittest.IdentifierFixture() - expected := unittest.IndexFixture() - - // retreive without store - _, err := store.ByBlockID(blockID) - require.ErrorIs(t, err, storage.ErrNotFound) - - // store index - err = store.Store(blockID, expected) - require.NoError(t, err) - - // retreive index - actual, err := store.ByBlockID(blockID) - require.NoError(t, err) - require.Equal(t, expected, actual) - }) -} diff --git a/storage/badger/init.go b/storage/badger/init.go index 31b01741523..83499c3dfc2 100644 --- a/storage/badger/init.go +++ b/storage/badger/init.go @@ -1,10 +1,18 @@ package badger +// TODO(leo): rename to open.go + import ( + "errors" "fmt" + "io/fs" + "os" + "path/filepath" + "strings" "github.com/dgraph-io/badger/v2" + "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/storage/badger/operation" ) @@ -13,8 +21,7 @@ import ( // return an error. Once a database type marker has been set using these methods, // the type cannot be changed. func InitPublic(opts badger.Options) (*badger.DB, error) { - - db, err := badger.Open(opts) + db, err := SafeOpen(opts) if err != nil { return nil, fmt.Errorf("could not open db: %w", err) } @@ -34,8 +41,7 @@ func InitPublic(opts badger.Options) (*badger.DB, error) { // return an error. Once a database type marker has been set using these methods, // the type cannot be changed. func InitSecret(opts badger.Options) (*badger.DB, error) { - - db, err := badger.Open(opts) + db, err := SafeOpen(opts) if err != nil { return nil, fmt.Errorf("could not open db: %w", err) } @@ -49,3 +55,93 @@ func InitSecret(opts badger.Options) (*badger.DB, error) { return db, nil } + +func IsBadgerFolder(dataDir string) (bool, error) { + // Check if the directory exists + info, err := os.Stat(dataDir) + if err != nil { + return false, err + } + if !info.IsDir() { + return false, errors.New("provided path is not a directory") + } + + // Flags to indicate presence of key BadgerDB files + var hasKeyRegistry, hasVLOG, hasManifest bool + + err = filepath.WalkDir(dataDir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + if d.IsDir() { + return nil + } + + name := d.Name() + switch { + case strings.HasSuffix(name, ".vlog"): + hasVLOG = true + case name == "KEYREGISTRY": + hasKeyRegistry = true + case name == "MANIFEST": + hasManifest = true + } + + // Short-circuit once we know it's a Badger folder + if hasKeyRegistry && hasVLOG && hasManifest { + return fs.SkipDir + } + return nil + }) + + if err != nil && !errors.Is(err, fs.SkipDir) { + return false, err + } + + isBadger := hasKeyRegistry && hasVLOG && hasManifest + return isBadger, nil +} + +// EnsureBadgerFolder ensures the given directory is either empty (including does not exist), +// or is a valid Badger folder. It returns an error if the directory exists and is not a Badger folder. +func EnsureBadgerFolder(dataDir string) error { + ok, err := util.IsEmptyOrNotExists(dataDir) + if err != nil { + return fmt.Errorf("error checking if folder is empty or does not exist: %w", err) + } + + // if the folder is empty or does not exist, then it can be used as a Badger folder + if ok { + return nil + } + + isBadger, err := IsBadgerFolder(dataDir) + if err != nil { + return fmt.Errorf("error checking if folder is a Badger folder: %w", err) + } + if !isBadger { + return fmt.Errorf("folder %s is not a Badger folder", dataDir) + } + return nil +} + +// SafeOpen opens a Badger database with the provided options, ensuring that the +// directory is a valid Badger folder. If the directory is not valid, it returns an error. +// This is useful to prevent accidental opening of a non-Badger (pebble) directory as a Badger database, +// which could wipe out the existing data. +func SafeOpen(opts badger.Options) (*badger.DB, error) { + // Check if the directory is a Badger folder + err := EnsureBadgerFolder(opts.Dir) + if err != nil { + return nil, fmt.Errorf("could not assert badger folder: %w", err) + } + + // Open the database + db, err := badger.Open(opts) + if err != nil { + return nil, fmt.Errorf("could not open db: %w", err) + } + + return db, nil +} diff --git a/storage/badger/init_test.go b/storage/badger/init_test.go index 7392babce41..99c96959ece 100644 --- a/storage/badger/init_test.go +++ b/storage/badger/init_test.go @@ -3,6 +3,7 @@ package badger_test import ( "testing" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/require" @@ -54,3 +55,30 @@ func TestEncryptionKeyMismatch(t *testing.T) { require.Error(t, err) }) } + +func TestIsBadgerFolder(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + ok, err := bstorage.IsBadgerFolder(dir) + require.NoError(t, err) + require.False(t, ok) + + db := unittest.BadgerDB(t, dir) + ok, err = bstorage.IsBadgerFolder(dir) + require.NoError(t, err) + require.True(t, ok) + require.NoError(t, db.Close()) + }) +} + +func TestPebbleIsNotBadgerFolder(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + db, err := pebble.Open(dir, &pebble.Options{}) + require.NoError(t, err) + + ok, err := bstorage.IsBadgerFolder(dir) + require.NoError(t, err) + require.False(t, ok) + + require.NoError(t, db.Close()) + }) +} diff --git a/storage/badger/operation/approvals.go b/storage/badger/operation/approvals.go deleted file mode 100644 index 8a994eed2a2..00000000000 --- a/storage/badger/operation/approvals.go +++ /dev/null @@ -1,31 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertResultApproval inserts a ResultApproval by ID. -func InsertResultApproval(approval *flow.ResultApproval) func(*badger.Txn) error { - return insert(makePrefix(codeResultApproval, approval.ID()), approval) -} - -// RetrieveResultApproval retrieves an approval by ID. -func RetrieveResultApproval(approvalID flow.Identifier, approval *flow.ResultApproval) func(*badger.Txn) error { - return retrieve(makePrefix(codeResultApproval, approvalID), approval) -} - -// IndexResultApproval inserts a ResultApproval ID keyed by ExecutionResult ID -// and chunk index. If a value for this key exists, a storage.ErrAlreadyExists -// error is returned. This operation is only used by the ResultApprovals store, -// which is only used within a Verification node, where it is assumed that there -// is only one approval per chunk. -func IndexResultApproval(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeIndexResultApprovalByChunk, resultID, chunkIndex), approvalID) -} - -// LookupResultApproval finds a ResultApproval by result ID and chunk index. -func LookupResultApproval(resultID flow.Identifier, chunkIndex uint64, approvalID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeIndexResultApprovalByChunk, resultID, chunkIndex), approvalID) -} diff --git a/storage/badger/operation/bft.go b/storage/badger/operation/bft.go deleted file mode 100644 index 1c9d3c25d47..00000000000 --- a/storage/badger/operation/bft.go +++ /dev/null @@ -1,42 +0,0 @@ -package operation - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" -) - -// PurgeBlocklist removes the set of blocked nodes IDs from the data base. -// If no corresponding entry exists, this function is a no-op. -// No errors are expected during normal operations. -// TODO: TEMPORARY manual override for adding node IDs to list of ejected nodes, applies to networking layer only -func PurgeBlocklist() func(*badger.Txn) error { - return func(tx *badger.Txn) error { - err := remove(makePrefix(disallowedNodeIDs))(tx) - if err != nil && !errors.Is(err, storage.ErrNotFound) { - return fmt.Errorf("enexpected error while purging blocklist: %w", err) - } - return nil - } -} - -// PersistBlocklist writes the set of blocked nodes IDs into the data base. -// If an entry already exists, it is overwritten; otherwise a new entry is created. -// No errors are expected during normal operations. -// -// TODO: TEMPORARY manual override for adding node IDs to list of ejected nodes, applies to networking layer only -func PersistBlocklist(blocklist map[flow.Identifier]struct{}) func(*badger.Txn) error { - return upsert(makePrefix(disallowedNodeIDs), blocklist) -} - -// RetrieveBlocklist reads the set of blocked node IDs from the data base. -// Returns `storage.ErrNotFound` error in case no respective data base entry is present. -// -// TODO: TEMPORARY manual override for adding node IDs to list of ejected nodes, applies to networking layer only -func RetrieveBlocklist(blocklist *map[flow.Identifier]struct{}) func(*badger.Txn) error { - return retrieve(makePrefix(disallowedNodeIDs), blocklist) -} diff --git a/storage/badger/operation/bft_test.go b/storage/badger/operation/bft_test.go deleted file mode 100644 index f1b573659fc..00000000000 --- a/storage/badger/operation/bft_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" -) - -// Test_PersistBlocklist tests the operations: -// - PersistBlocklist(blocklist map[flow.Identifier]struct{}) -// - RetrieveBlocklist(blocklist *map[flow.Identifier]struct{}) -// - PurgeBlocklist() -func Test_PersistBlocklist(t *testing.T) { - t.Run("Retrieving non-existing blocklist should return 'storage.ErrNotFound'", func(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - var blocklist map[flow.Identifier]struct{} - err := db.View(RetrieveBlocklist(&blocklist)) - require.ErrorIs(t, err, storage.ErrNotFound) - - }) - }) - - t.Run("Persisting and read blocklist", func(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - blocklist := unittest.IdentifierListFixture(8).Lookup() - err := db.Update(PersistBlocklist(blocklist)) - require.NoError(t, err) - - var b map[flow.Identifier]struct{} - err = db.View(RetrieveBlocklist(&b)) - require.NoError(t, err) - require.Equal(t, blocklist, b) - }) - }) - - t.Run("Overwrite blocklist", func(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - blocklist1 := unittest.IdentifierListFixture(8).Lookup() - err := db.Update(PersistBlocklist(blocklist1)) - require.NoError(t, err) - - blocklist2 := unittest.IdentifierListFixture(8).Lookup() - err = db.Update(PersistBlocklist(blocklist2)) - require.NoError(t, err) - - var b map[flow.Identifier]struct{} - err = db.View(RetrieveBlocklist(&b)) - require.NoError(t, err) - require.Equal(t, blocklist2, b) - }) - }) - - t.Run("Write & Purge & Write blocklist", func(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - blocklist1 := unittest.IdentifierListFixture(8).Lookup() - err := db.Update(PersistBlocklist(blocklist1)) - require.NoError(t, err) - - err = db.Update(PurgeBlocklist()) - require.NoError(t, err) - - var b map[flow.Identifier]struct{} - err = db.View(RetrieveBlocklist(&b)) - require.ErrorIs(t, err, storage.ErrNotFound) - - blocklist2 := unittest.IdentifierListFixture(8).Lookup() - err = db.Update(PersistBlocklist(blocklist2)) - require.NoError(t, err) - - err = db.View(RetrieveBlocklist(&b)) - require.NoError(t, err) - require.Equal(t, blocklist2, b) - }) - }) - - t.Run("Purge non-existing blocklist", func(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - var b map[flow.Identifier]struct{} - - err := db.View(RetrieveBlocklist(&b)) - require.ErrorIs(t, err, storage.ErrNotFound) - - err = db.Update(PurgeBlocklist()) - require.NoError(t, err) - - err = db.View(RetrieveBlocklist(&b)) - require.ErrorIs(t, err, storage.ErrNotFound) - }) - }) -} diff --git a/storage/badger/operation/children.go b/storage/badger/operation/children.go deleted file mode 100644 index 92eb0c35918..00000000000 --- a/storage/badger/operation/children.go +++ /dev/null @@ -1,22 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertBlockChildren insert an index to lookup the direct child of a block by its ID -func InsertBlockChildren(blockID flow.Identifier, childrenIDs flow.IdentifierList) func(*badger.Txn) error { - return insert(makePrefix(codeBlockChildren, blockID), childrenIDs) -} - -// UpdateBlockChildren updates the children for a block. -func UpdateBlockChildren(blockID flow.Identifier, childrenIDs flow.IdentifierList) func(*badger.Txn) error { - return update(makePrefix(codeBlockChildren, blockID), childrenIDs) -} - -// RetrieveBlockChildren the child block ID by parent block ID -func RetrieveBlockChildren(blockID flow.Identifier, childrenIDs *flow.IdentifierList) func(*badger.Txn) error { - return retrieve(makePrefix(codeBlockChildren, blockID), childrenIDs) -} diff --git a/storage/badger/operation/children_test.go b/storage/badger/operation/children_test.go deleted file mode 100644 index 629488373aa..00000000000 --- a/storage/badger/operation/children_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestBlockChildrenIndexUpdateLookup(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - blockID := unittest.IdentifierFixture() - childrenIDs := unittest.IdentifierListFixture(8) - var retrievedIDs flow.IdentifierList - - err := db.Update(InsertBlockChildren(blockID, childrenIDs)) - require.NoError(t, err) - err = db.View(RetrieveBlockChildren(blockID, &retrievedIDs)) - require.NoError(t, err) - assert.Equal(t, childrenIDs, retrievedIDs) - - altIDs := unittest.IdentifierListFixture(4) - err = db.Update(UpdateBlockChildren(blockID, altIDs)) - require.NoError(t, err) - err = db.View(RetrieveBlockChildren(blockID, &retrievedIDs)) - require.NoError(t, err) - assert.Equal(t, altIDs, retrievedIDs) - }) -} diff --git a/storage/badger/operation/chunk_locators.go b/storage/badger/operation/chunk_locators.go deleted file mode 100644 index ef7f11fec50..00000000000 --- a/storage/badger/operation/chunk_locators.go +++ /dev/null @@ -1,16 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/chunks" - "github.com/onflow/flow-go/model/flow" -) - -func InsertChunkLocator(locator *chunks.Locator) func(*badger.Txn) error { - return insert(makePrefix(codeChunk, locator.ID()), locator) -} - -func RetrieveChunkLocator(locatorID flow.Identifier, locator *chunks.Locator) func(*badger.Txn) error { - return retrieve(makePrefix(codeChunk, locatorID), locator) -} diff --git a/storage/badger/operation/cluster.go b/storage/badger/operation/cluster.go deleted file mode 100644 index 8163285c62f..00000000000 --- a/storage/badger/operation/cluster.go +++ /dev/null @@ -1,83 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// This file implements storage functions for chain state book-keeping of -// collection node cluster consensus. In contrast to the corresponding functions -// for regular consensus, these functions include the cluster ID in order to -// support storing multiple chains, for example during epoch switchover. - -// IndexClusterBlockHeight inserts a block number to block ID mapping for -// the given cluster. -func IndexClusterBlockHeight(clusterID flow.ChainID, number uint64, blockID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeFinalizedCluster, clusterID, number), blockID) -} - -// LookupClusterBlockHeight retrieves a block ID by number for the given cluster -func LookupClusterBlockHeight(clusterID flow.ChainID, number uint64, blockID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeFinalizedCluster, clusterID, number), blockID) -} - -// InsertClusterFinalizedHeight inserts the finalized boundary for the given cluster. -func InsertClusterFinalizedHeight(clusterID flow.ChainID, number uint64) func(*badger.Txn) error { - return insert(makePrefix(codeClusterHeight, clusterID), number) -} - -// UpdateClusterFinalizedHeight updates the finalized boundary for the given cluster. -func UpdateClusterFinalizedHeight(clusterID flow.ChainID, number uint64) func(*badger.Txn) error { - return update(makePrefix(codeClusterHeight, clusterID), number) -} - -// RetrieveClusterFinalizedHeight retrieves the finalized boundary for the given cluster. -func RetrieveClusterFinalizedHeight(clusterID flow.ChainID, number *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeClusterHeight, clusterID), number) -} - -// IndexReferenceBlockByClusterBlock inserts the reference block ID for the given -// cluster block ID. While each cluster block specifies a reference block in its -// payload, we maintain this additional lookup for performance reasons. -func IndexReferenceBlockByClusterBlock(clusterBlockID, refID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeClusterBlockToRefBlock, clusterBlockID), refID) -} - -// LookupReferenceBlockByClusterBlock looks up the reference block ID for the given -// cluster block ID. While each cluster block specifies a reference block in its -// payload, we maintain this additional lookup for performance reasons. -func LookupReferenceBlockByClusterBlock(clusterBlockID flow.Identifier, refID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeClusterBlockToRefBlock, clusterBlockID), refID) -} - -// IndexClusterBlockByReferenceHeight indexes a cluster block ID by its reference -// block height. The cluster block ID is included in the key for more efficient -// traversal. Only finalized cluster blocks should be included in this index. -// The key looks like: -func IndexClusterBlockByReferenceHeight(refHeight uint64, clusterBlockID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeRefHeightToClusterBlock, refHeight, clusterBlockID), nil) -} - -// LookupClusterBlocksByReferenceHeightRange traverses the ref_height->cluster_block -// index and returns any finalized cluster blocks which have a reference block with -// height in the given range. This is used to avoid including duplicate transaction -// when building or validating a new collection. -func LookupClusterBlocksByReferenceHeightRange(start, end uint64, clusterBlockIDs *[]flow.Identifier) func(*badger.Txn) error { - startPrefix := makePrefix(codeRefHeightToClusterBlock, start) - endPrefix := makePrefix(codeRefHeightToClusterBlock, end) - prefixLen := len(startPrefix) - - return iterate(startPrefix, endPrefix, func() (checkFunc, createFunc, handleFunc) { - check := func(key []byte) bool { - clusterBlockIDBytes := key[prefixLen:] - var clusterBlockID flow.Identifier - copy(clusterBlockID[:], clusterBlockIDBytes) - *clusterBlockIDs = append(*clusterBlockIDs, clusterBlockID) - - // the info we need is stored in the key, never process the value - return false - } - return check, nil, nil - }, withPrefetchValuesFalse) -} diff --git a/storage/badger/operation/cluster_test.go b/storage/badger/operation/cluster_test.go deleted file mode 100644 index e01d7f24843..00000000000 --- a/storage/badger/operation/cluster_test.go +++ /dev/null @@ -1,312 +0,0 @@ -package operation_test - -import ( - "fmt" - "math/rand" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestClusterHeights(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - var ( - clusterID flow.ChainID = "cluster" - height uint64 = 42 - expected = unittest.IdentifierFixture() - err error - ) - - t.Run("retrieve non-existent", func(t *testing.T) { - var actual flow.Identifier - err = db.View(operation.LookupClusterBlockHeight(clusterID, height, &actual)) - t.Log(err) - assert.ErrorIs(t, err, storage.ErrNotFound) - }) - - t.Run("insert/retrieve", func(t *testing.T) { - err = db.Update(operation.IndexClusterBlockHeight(clusterID, height, expected)) - assert.NoError(t, err) - - var actual flow.Identifier - err = db.View(operation.LookupClusterBlockHeight(clusterID, height, &actual)) - assert.NoError(t, err) - assert.Equal(t, expected, actual) - }) - - t.Run("multiple chain IDs", func(t *testing.T) { - for i := 0; i < 3; i++ { - // use different cluster ID but same block height - clusterID = flow.ChainID(fmt.Sprintf("cluster-%d", i)) - expected = unittest.IdentifierFixture() - - var actual flow.Identifier - err = db.View(operation.LookupClusterBlockHeight(clusterID, height, &actual)) - assert.ErrorIs(t, err, storage.ErrNotFound) - - err = db.Update(operation.IndexClusterBlockHeight(clusterID, height, expected)) - assert.NoError(t, err) - - err = db.View(operation.LookupClusterBlockHeight(clusterID, height, &actual)) - assert.NoError(t, err) - assert.Equal(t, expected, actual) - } - }) - }) -} - -func TestClusterBoundaries(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - var ( - clusterID flow.ChainID = "cluster" - expected uint64 = 42 - err error - ) - - t.Run("retrieve non-existant", func(t *testing.T) { - var actual uint64 - err = db.View(operation.RetrieveClusterFinalizedHeight(clusterID, &actual)) - t.Log(err) - assert.ErrorIs(t, err, storage.ErrNotFound) - }) - - t.Run("insert/retrieve", func(t *testing.T) { - err = db.Update(operation.InsertClusterFinalizedHeight(clusterID, 21)) - assert.NoError(t, err) - - err = db.Update(operation.UpdateClusterFinalizedHeight(clusterID, expected)) - assert.NoError(t, err) - - var actual uint64 - err = db.View(operation.RetrieveClusterFinalizedHeight(clusterID, &actual)) - assert.NoError(t, err) - assert.Equal(t, expected, actual) - }) - - t.Run("multiple chain IDs", func(t *testing.T) { - for i := 0; i < 3; i++ { - // use different cluster ID but same boundary - clusterID = flow.ChainID(fmt.Sprintf("cluster-%d", i)) - expected = uint64(i) - - var actual uint64 - err = db.View(operation.RetrieveClusterFinalizedHeight(clusterID, &actual)) - assert.ErrorIs(t, err, storage.ErrNotFound) - - err = db.Update(operation.InsertClusterFinalizedHeight(clusterID, expected)) - assert.NoError(t, err) - - err = db.View(operation.RetrieveClusterFinalizedHeight(clusterID, &actual)) - assert.NoError(t, err) - assert.Equal(t, expected, actual) - } - }) - }) -} - -func TestClusterBlockByReferenceHeight(t *testing.T) { - - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - t.Run("should be able to index cluster block by reference height", func(t *testing.T) { - id := unittest.IdentifierFixture() - height := rand.Uint64() - err := db.Update(operation.IndexClusterBlockByReferenceHeight(height, id)) - assert.NoError(t, err) - - var retrieved []flow.Identifier - err = db.View(operation.LookupClusterBlocksByReferenceHeightRange(height, height, &retrieved)) - assert.NoError(t, err) - require.Len(t, retrieved, 1) - assert.Equal(t, id, retrieved[0]) - }) - }) - - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - t.Run("should be able to index multiple cluster blocks at same reference height", func(t *testing.T) { - ids := unittest.IdentifierListFixture(10) - height := rand.Uint64() - for _, id := range ids { - err := db.Update(operation.IndexClusterBlockByReferenceHeight(height, id)) - assert.NoError(t, err) - } - - var retrieved []flow.Identifier - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(height, height, &retrieved)) - assert.NoError(t, err) - assert.Len(t, retrieved, len(ids)) - assert.ElementsMatch(t, ids, retrieved) - }) - }) - - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - t.Run("should be able to lookup cluster blocks across height range", func(t *testing.T) { - ids := unittest.IdentifierListFixture(100) - nextHeight := rand.Uint64() - // keep track of height range - minHeight, maxHeight := nextHeight, nextHeight - // keep track of which ids are indexed at each nextHeight - lookup := make(map[uint64][]flow.Identifier) - - for i := 0; i < len(ids); i++ { - // randomly adjust the nextHeight, increasing on average - r := rand.Intn(100) - if r < 20 { - nextHeight -= 1 // 20% - } else if r < 40 { - // nextHeight stays the same - 20% - } else if r < 80 { - nextHeight += 1 // 40% - } else { - nextHeight += 2 // 20% - } - - lookup[nextHeight] = append(lookup[nextHeight], ids[i]) - if nextHeight < minHeight { - minHeight = nextHeight - } - if nextHeight > maxHeight { - maxHeight = nextHeight - } - - err := db.Update(operation.IndexClusterBlockByReferenceHeight(nextHeight, ids[i])) - assert.NoError(t, err) - } - - // determine which ids we expect to be retrieved for a given height range - idsInHeightRange := func(min, max uint64) []flow.Identifier { - var idsForHeight []flow.Identifier - for height, id := range lookup { - if min <= height && height <= max { - idsForHeight = append(idsForHeight, id...) - } - } - return idsForHeight - } - - // Test cases are described as follows: - // {---} represents the queried height range - // [---] represents the indexed height range - // [{ means the left endpoint of both ranges are the same - // {-[ means the left endpoint of the queried range is strictly less than the indexed range - t.Run("{-}--[-]", func(t *testing.T) { - var retrieved []flow.Identifier - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(minHeight-100, minHeight-1, &retrieved)) - assert.NoError(t, err) - assert.Len(t, retrieved, 0) - }) - t.Run("{-[--}-]", func(t *testing.T) { - var retrieved []flow.Identifier - min := minHeight - 100 - max := minHeight + (maxHeight-minHeight)/2 - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(min, max, &retrieved)) - assert.NoError(t, err) - - expected := idsInHeightRange(min, max) - assert.NotEmpty(t, expected, "test assumption broken") - assert.Len(t, retrieved, len(expected)) - assert.ElementsMatch(t, expected, retrieved) - }) - t.Run("{[--}--]", func(t *testing.T) { - var retrieved []flow.Identifier - min := minHeight - max := minHeight + (maxHeight-minHeight)/2 - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(min, max, &retrieved)) - assert.NoError(t, err) - - expected := idsInHeightRange(min, max) - assert.NotEmpty(t, expected, "test assumption broken") - assert.Len(t, retrieved, len(expected)) - assert.ElementsMatch(t, expected, retrieved) - }) - t.Run("[-{--}-]", func(t *testing.T) { - var retrieved []flow.Identifier - min := minHeight + 1 - max := maxHeight - 1 - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(min, max, &retrieved)) - assert.NoError(t, err) - - expected := idsInHeightRange(min, max) - assert.NotEmpty(t, expected, "test assumption broken") - assert.Len(t, retrieved, len(expected)) - assert.ElementsMatch(t, expected, retrieved) - }) - t.Run("[{----}]", func(t *testing.T) { - var retrieved []flow.Identifier - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(minHeight, maxHeight, &retrieved)) - assert.NoError(t, err) - - expected := idsInHeightRange(minHeight, maxHeight) - assert.NotEmpty(t, expected, "test assumption broken") - assert.Len(t, retrieved, len(expected)) - assert.ElementsMatch(t, expected, retrieved) - }) - t.Run("[--{--}]", func(t *testing.T) { - var retrieved []flow.Identifier - min := minHeight + (maxHeight-minHeight)/2 - max := maxHeight - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(min, max, &retrieved)) - assert.NoError(t, err) - - expected := idsInHeightRange(min, max) - assert.NotEmpty(t, expected, "test assumption broken") - assert.Len(t, retrieved, len(expected)) - assert.ElementsMatch(t, expected, retrieved) - }) - t.Run("[-{--]-}", func(t *testing.T) { - var retrieved []flow.Identifier - min := minHeight + (maxHeight-minHeight)/2 - max := maxHeight + 100 - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(min, max, &retrieved)) - assert.NoError(t, err) - - expected := idsInHeightRange(min, max) - assert.NotEmpty(t, expected, "test assumption broken") - assert.Len(t, retrieved, len(expected)) - assert.ElementsMatch(t, expected, retrieved) - }) - t.Run("[-]--{-}", func(t *testing.T) { - var retrieved []flow.Identifier - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(maxHeight+1, maxHeight+100, &retrieved)) - assert.NoError(t, err) - assert.Len(t, retrieved, 0) - }) - }) - }) -} - -// expected average case # of blocks to lookup on Mainnet -func BenchmarkLookupClusterBlocksByReferenceHeightRange_1200(b *testing.B) { - benchmarkLookupClusterBlocksByReferenceHeightRange(b, 1200) -} - -// 5x average case on Mainnet -func BenchmarkLookupClusterBlocksByReferenceHeightRange_6_000(b *testing.B) { - benchmarkLookupClusterBlocksByReferenceHeightRange(b, 6_000) -} - -func BenchmarkLookupClusterBlocksByReferenceHeightRange_100_000(b *testing.B) { - benchmarkLookupClusterBlocksByReferenceHeightRange(b, 100_000) -} - -func benchmarkLookupClusterBlocksByReferenceHeightRange(b *testing.B, n int) { - unittest.RunWithBadgerDB(b, func(db *badger.DB) { - for i := 0; i < n; i++ { - err := db.Update(operation.IndexClusterBlockByReferenceHeight(rand.Uint64()%1000, unittest.IdentifierFixture())) - require.NoError(b, err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - var blockIDs []flow.Identifier - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(0, 1000, &blockIDs)) - require.NoError(b, err) - } - }) -} diff --git a/storage/badger/operation/collections.go b/storage/badger/operation/collections.go deleted file mode 100644 index 3f6c22abd68..00000000000 --- a/storage/badger/operation/collections.go +++ /dev/null @@ -1,44 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// NOTE: These insert light collections, which only contain references -// to the constituent transactions. They do not modify transactions contained -// by the collections. - -func InsertCollection(collection *flow.LightCollection) func(*badger.Txn) error { - return insert(makePrefix(codeCollection, collection.ID()), collection) -} - -func RetrieveCollection(collID flow.Identifier, collection *flow.LightCollection) func(*badger.Txn) error { - return retrieve(makePrefix(codeCollection, collID), collection) -} - -func RemoveCollection(collID flow.Identifier) func(*badger.Txn) error { - return remove(makePrefix(codeCollection, collID)) -} - -// IndexCollectionPayload indexes the transactions within the collection payload -// of a cluster block. -func IndexCollectionPayload(blockID flow.Identifier, txIDs []flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeIndexCollection, blockID), txIDs) -} - -// LookupCollection looks up the collection for a given cluster payload. -func LookupCollectionPayload(blockID flow.Identifier, txIDs *[]flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeIndexCollection, blockID), txIDs) -} - -// IndexCollectionByTransaction inserts a collection id keyed by a transaction id -func IndexCollectionByTransaction(txID flow.Identifier, collectionID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeIndexCollectionByTransaction, txID), collectionID) -} - -// LookupCollectionID retrieves a collection id by transaction id -func RetrieveCollectionID(txID flow.Identifier, collectionID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeIndexCollectionByTransaction, txID), collectionID) -} diff --git a/storage/badger/operation/collections_test.go b/storage/badger/operation/collections_test.go deleted file mode 100644 index cec97a71a74..00000000000 --- a/storage/badger/operation/collections_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestCollections(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := unittest.CollectionFixture(2).Light() - - t.Run("Retrieve nonexistant", func(t *testing.T) { - var actual flow.LightCollection - err := db.View(RetrieveCollection(expected.ID(), &actual)) - assert.Error(t, err) - }) - - t.Run("Save", func(t *testing.T) { - err := db.Update(InsertCollection(&expected)) - require.NoError(t, err) - - var actual flow.LightCollection - err = db.View(RetrieveCollection(expected.ID(), &actual)) - assert.NoError(t, err) - - assert.Equal(t, expected, actual) - }) - - t.Run("Remove", func(t *testing.T) { - err := db.Update(RemoveCollection(expected.ID())) - require.NoError(t, err) - - var actual flow.LightCollection - err = db.View(RetrieveCollection(expected.ID(), &actual)) - assert.Error(t, err) - }) - - t.Run("Index and lookup", func(t *testing.T) { - expected := unittest.CollectionFixture(1).Light() - blockID := unittest.IdentifierFixture() - - _ = db.Update(func(tx *badger.Txn) error { - err := InsertCollection(&expected)(tx) - assert.NoError(t, err) - err = IndexCollectionPayload(blockID, expected.Transactions)(tx) - assert.NoError(t, err) - return nil - }) - - var actual flow.LightCollection - err := db.View(LookupCollectionPayload(blockID, &actual.Transactions)) - assert.NoError(t, err) - - assert.Equal(t, expected, actual) - }) - - t.Run("Index and lookup by transaction ID", func(t *testing.T) { - expected := unittest.IdentifierFixture() - transactionID := unittest.IdentifierFixture() - actual := flow.Identifier{} - - _ = db.Update(func(tx *badger.Txn) error { - err := IndexCollectionByTransaction(transactionID, expected)(tx) - assert.NoError(t, err) - err = RetrieveCollectionID(transactionID, &actual)(tx) - assert.NoError(t, err) - return nil - }) - assert.Equal(t, expected, actual) - }) - }) -} diff --git a/storage/badger/operation/common.go b/storage/badger/operation/common.go index e1bca789d2b..c1deb1b7f5a 100644 --- a/storage/badger/operation/common.go +++ b/storage/badger/operation/common.go @@ -13,37 +13,6 @@ import ( "github.com/onflow/flow-go/storage" ) -// batchWrite will encode the given entity using msgpack and will upsert the resulting -// binary data in the badger wrote batch under the provided key - if the value already exists -// in the database it will be overridden. -// No errors are expected during normal operation. -func batchWrite(key []byte, entity interface{}) func(writeBatch *badger.WriteBatch) error { - return func(writeBatch *badger.WriteBatch) error { - - // update the maximum key size if the inserted key is bigger - if uint32(len(key)) > max { - max = uint32(len(key)) - err := SetMax(writeBatch) - if err != nil { - return fmt.Errorf("could not update max tracker: %w", err) - } - } - - // serialize the entity data - val, err := msgpack.Marshal(entity) - if err != nil { - return irrecoverable.NewExceptionf("could not encode entity: %w", err) - } - - // persist the entity data into the DB - err = writeBatch.Set(key, val) - if err != nil { - return irrecoverable.NewExceptionf("could not store data: %w", err) - } - return nil - } -} - // insert will encode the given entity using msgpack and will insert the resulting // binary data in the badger DB under the provided key. It will error if the // key already exists. @@ -175,19 +144,6 @@ func remove(key []byte) func(*badger.Txn) error { } } -// batchRemove removes entry under a given key in a write-batch. -// if key doesn't exist, does nothing. -// No errors are expected during normal operation. -func batchRemove(key []byte) func(writeBatch *badger.WriteBatch) error { - return func(writeBatch *badger.WriteBatch) error { - err := writeBatch.Delete(key) - if err != nil { - return irrecoverable.NewExceptionf("could not batch delete data: %w", err) - } - return nil - } -} - // removeByPrefix removes all the entities if the prefix of the key matches the given prefix. // if no key matches, this is a no-op // No errors are expected during normal operation. @@ -308,14 +264,6 @@ func lookup(entityIDs *[]flow.Identifier) func() (checkFunc, createFunc, handleF } } -// withPrefetchValuesFalse configures a Badger iteration to NOT preemptively load -// the values when iterating over keys (ie. key-only iteration). Key-only iteration -// is several order of magnitudes faster than regular iteration, because it involves -// access to the LSM-tree only, which is usually resident entirely in RAM. -func withPrefetchValuesFalse(options *badger.IteratorOptions) { - options.PrefetchValues = false -} - // iterate iterates over a range of keys defined by a start and end key. The // start key may be higher than the end key, in which case we iterate in // reverse order. diff --git a/storage/badger/operation/epoch.go b/storage/badger/operation/epoch.go deleted file mode 100644 index 841d7e05994..00000000000 --- a/storage/badger/operation/epoch.go +++ /dev/null @@ -1,30 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -func InsertEpochSetup(eventID flow.Identifier, event *flow.EpochSetup) func(*badger.Txn) error { - return insert(makePrefix(codeEpochSetup, eventID), event) -} - -func RetrieveEpochSetup(eventID flow.Identifier, event *flow.EpochSetup) func(*badger.Txn) error { - return retrieve(makePrefix(codeEpochSetup, eventID), event) -} - -func InsertEpochCommit(eventID flow.Identifier, event *flow.EpochCommit) func(*badger.Txn) error { - return insert(makePrefix(codeEpochCommit, eventID), event) -} - -// InsertEpochCommitV0 inserts an epoch commit event. This is used only in testing to verify that we have backward compatibility -// at storage layer. -// TODO(EFM, #6794): Remove this once we complete the network upgrade -func InsertEpochCommitV0(eventID flow.Identifier, event any) func(*badger.Txn) error { - return insert(makePrefix(codeEpochCommit, eventID), event) -} - -func RetrieveEpochCommit(eventID flow.Identifier, event *flow.EpochCommit) func(*badger.Txn) error { - return retrieve(makePrefix(codeEpochCommit, eventID), event) -} diff --git a/storage/badger/operation/epoch_protocol_state.go b/storage/badger/operation/epoch_protocol_state.go deleted file mode 100644 index a39ec1312b0..00000000000 --- a/storage/badger/operation/epoch_protocol_state.go +++ /dev/null @@ -1,39 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertEpochProtocolState inserts an epoch protocol state entry by ID. -// Error returns: -// - storage.ErrAlreadyExists if the key already exists in the database. -// - generic error in case of unexpected failure from the database layer or encoding failure. -func InsertEpochProtocolState(entryID flow.Identifier, entry *flow.MinEpochStateEntry) func(*badger.Txn) error { - return insert(makePrefix(codeEpochProtocolState, entryID), entry) -} - -// RetrieveEpochProtocolState retrieves an epoch protocol state entry by ID. -// Error returns: -// - storage.ErrNotFound if the key does not exist in the database -// - generic error in case of unexpected failure from the database layer -func RetrieveEpochProtocolState(entryID flow.Identifier, entry *flow.MinEpochStateEntry) func(*badger.Txn) error { - return retrieve(makePrefix(codeEpochProtocolState, entryID), entry) -} - -// IndexEpochProtocolState indexes an epoch protocol state entry by block ID. -// Error returns: -// - storage.ErrAlreadyExists if the key already exists in the database. -// - generic error in case of unexpected failure from the database layer or encoding failure. -func IndexEpochProtocolState(blockID flow.Identifier, epochProtocolStateEntryID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeEpochProtocolStateByBlockID, blockID), epochProtocolStateEntryID) -} - -// LookupEpochProtocolState finds an epoch protocol state entry ID by block ID. -// Error returns: -// - storage.ErrNotFound if the key does not exist in the database -// - generic error in case of unexpected failure from the database layer -func LookupEpochProtocolState(blockID flow.Identifier, epochProtocolStateEntryID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeEpochProtocolStateByBlockID, blockID), epochProtocolStateEntryID) -} diff --git a/storage/badger/operation/guarantees.go b/storage/badger/operation/guarantees.go deleted file mode 100644 index cfefead5f5b..00000000000 --- a/storage/badger/operation/guarantees.go +++ /dev/null @@ -1,23 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -func InsertGuarantee(collID flow.Identifier, guarantee *flow.CollectionGuarantee) func(*badger.Txn) error { - return insert(makePrefix(codeGuarantee, collID), guarantee) -} - -func RetrieveGuarantee(collID flow.Identifier, guarantee *flow.CollectionGuarantee) func(*badger.Txn) error { - return retrieve(makePrefix(codeGuarantee, collID), guarantee) -} - -func IndexPayloadGuarantees(blockID flow.Identifier, guarIDs []flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codePayloadGuarantees, blockID), guarIDs) -} - -func LookupPayloadGuarantees(blockID flow.Identifier, guarIDs *[]flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codePayloadGuarantees, blockID), guarIDs) -} diff --git a/storage/badger/operation/guarantees_test.go b/storage/badger/operation/guarantees_test.go deleted file mode 100644 index 3d7264d3aa8..00000000000 --- a/storage/badger/operation/guarantees_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/onflow/crypto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestGuaranteeInsertRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - g := unittest.CollectionGuaranteeFixture() - - err := db.Update(InsertGuarantee(g.CollectionID, g)) - require.NoError(t, err) - - var retrieved flow.CollectionGuarantee - err = db.View(RetrieveGuarantee(g.CollectionID, &retrieved)) - require.NoError(t, err) - - assert.Equal(t, g, &retrieved) - }) -} - -func TestIndexGuaranteedCollectionByBlockHashInsertRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - blockID := flow.Identifier{0x10} - collID1 := flow.Identifier{0x01} - collID2 := flow.Identifier{0x02} - guarantees := []*flow.CollectionGuarantee{ - {CollectionID: collID1, Signature: crypto.Signature{0x10}}, - {CollectionID: collID2, Signature: crypto.Signature{0x20}}, - } - expected := flow.GetIDs(guarantees) - - err := db.Update(func(tx *badger.Txn) error { - for _, guarantee := range guarantees { - if err := InsertGuarantee(guarantee.ID(), guarantee)(tx); err != nil { - return err - } - } - if err := IndexPayloadGuarantees(blockID, expected)(tx); err != nil { - return err - } - return nil - }) - require.NoError(t, err) - - var actual []flow.Identifier - err = db.View(LookupPayloadGuarantees(blockID, &actual)) - require.NoError(t, err) - - assert.Equal(t, []flow.Identifier{collID1, collID2}, actual) - }) -} - -func TestIndexGuaranteedCollectionByBlockHashMultipleBlocks(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - blockID1 := flow.Identifier{0x10} - blockID2 := flow.Identifier{0x20} - collID1 := flow.Identifier{0x01} - collID2 := flow.Identifier{0x02} - collID3 := flow.Identifier{0x03} - collID4 := flow.Identifier{0x04} - set1 := []*flow.CollectionGuarantee{ - {CollectionID: collID1, Signature: crypto.Signature{0x1}}, - } - set2 := []*flow.CollectionGuarantee{ - {CollectionID: collID2, Signature: crypto.Signature{0x2}}, - {CollectionID: collID3, Signature: crypto.Signature{0x3}}, - {CollectionID: collID4, Signature: crypto.Signature{0x1}}, - } - ids1 := flow.GetIDs(set1) - ids2 := flow.GetIDs(set2) - - // insert block 1 - err := db.Update(func(tx *badger.Txn) error { - for _, guarantee := range set1 { - if err := InsertGuarantee(guarantee.CollectionID, guarantee)(tx); err != nil { - return err - } - } - if err := IndexPayloadGuarantees(blockID1, ids1)(tx); err != nil { - return err - } - return nil - }) - require.NoError(t, err) - - // insert block 2 - err = db.Update(func(tx *badger.Txn) error { - for _, guarantee := range set2 { - if err := InsertGuarantee(guarantee.CollectionID, guarantee)(tx); err != nil { - return err - } - } - if err := IndexPayloadGuarantees(blockID2, ids2)(tx); err != nil { - return err - } - return nil - }) - require.NoError(t, err) - - t.Run("should retrieve collections for block", func(t *testing.T) { - var actual1 []flow.Identifier - err = db.View(LookupPayloadGuarantees(blockID1, &actual1)) - assert.NoError(t, err) - assert.ElementsMatch(t, []flow.Identifier{collID1}, actual1) - - // get block 2 - var actual2 []flow.Identifier - err = db.View(LookupPayloadGuarantees(blockID2, &actual2)) - assert.NoError(t, err) - assert.Equal(t, []flow.Identifier{collID2, collID3, collID4}, actual2) - }) - }) -} diff --git a/storage/badger/operation/headers.go b/storage/badger/operation/headers.go deleted file mode 100644 index 0ab0dd6b1ac..00000000000 --- a/storage/badger/operation/headers.go +++ /dev/null @@ -1,75 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -func InsertHeader(headerID flow.Identifier, header *flow.Header) func(*badger.Txn) error { - return insert(makePrefix(codeHeader, headerID), header) -} - -func RetrieveHeader(blockID flow.Identifier, header *flow.Header) func(*badger.Txn) error { - return retrieve(makePrefix(codeHeader, blockID), header) -} - -// IndexBlockHeight indexes the height of a block. It should only be called on -// finalized blocks. -func IndexBlockHeight(height uint64, blockID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeHeightToBlock, height), blockID) -} - -// LookupBlockHeight retrieves finalized blocks by height. -func LookupBlockHeight(height uint64, blockID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeHeightToBlock, height), blockID) -} - -// BlockExists checks whether the block exists in the database. -// No errors are expected during normal operation. -func BlockExists(blockID flow.Identifier, blockExists *bool) func(*badger.Txn) error { - return exists(makePrefix(codeHeader, blockID), blockExists) -} - -func InsertExecutedBlock(blockID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeExecutedBlock), blockID) -} - -func UpdateExecutedBlock(blockID flow.Identifier) func(*badger.Txn) error { - return update(makePrefix(codeExecutedBlock), blockID) -} - -func RetrieveExecutedBlock(blockID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeExecutedBlock), blockID) -} - -// IndexCollectionBlock indexes a block by a collection within that block. -func IndexCollectionBlock(collID flow.Identifier, blockID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeCollectionBlock, collID), blockID) -} - -// LookupCollectionBlock looks up a block by a collection within that block. -func LookupCollectionBlock(collID flow.Identifier, blockID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeCollectionBlock, collID), blockID) -} - -// FindHeaders iterates through all headers, calling `filter` on each, and adding -// them to the `found` slice if `filter` returned true -func FindHeaders(filter func(header *flow.Header) bool, found *[]flow.Header) func(*badger.Txn) error { - return traverse(makePrefix(codeHeader), func() (checkFunc, createFunc, handleFunc) { - check := func(key []byte) bool { - return true - } - var val flow.Header - create := func() interface{} { - return &val - } - handle := func() error { - if filter(&val) { - *found = append(*found, val) - } - return nil - } - return check, create, handle - }) -} diff --git a/storage/badger/operation/headers_test.go b/storage/badger/operation/headers_test.go deleted file mode 100644 index aa646f665e7..00000000000 --- a/storage/badger/operation/headers_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package operation - -import ( - "testing" - "time" - - "github.com/dgraph-io/badger/v2" - "github.com/onflow/crypto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestHeaderInsertCheckRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := &flow.Header{ - View: 1337, - Timestamp: time.Now().UTC(), - ParentID: flow.Identifier{0x11}, - PayloadHash: flow.Identifier{0x22}, - ParentVoterIndices: []byte{0x44}, - ParentVoterSigData: []byte{0x88}, - ProposerID: flow.Identifier{0x33}, - ProposerSigData: crypto.Signature{0x77}, - } - blockID := expected.ID() - - err := db.Update(InsertHeader(expected.ID(), expected)) - require.NoError(t, err) - - var actual flow.Header - err = db.View(RetrieveHeader(blockID, &actual)) - require.NoError(t, err) - - assert.Equal(t, *expected, actual) - }) -} - -func TestHeaderIDIndexByCollectionID(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - headerID := unittest.IdentifierFixture() - collectionID := unittest.IdentifierFixture() - - err := db.Update(IndexCollectionBlock(collectionID, headerID)) - require.NoError(t, err) - - actualID := &flow.Identifier{} - err = db.View(LookupCollectionBlock(collectionID, actualID)) - require.NoError(t, err) - assert.Equal(t, headerID, *actualID) - }) -} - -func TestBlockHeightIndexLookup(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - height := uint64(1337) - expected := flow.Identifier{0x01, 0x02, 0x03} - - err := db.Update(IndexBlockHeight(height, expected)) - require.NoError(t, err) - - var actual flow.Identifier - err = db.View(LookupBlockHeight(height, &actual)) - require.NoError(t, err) - - assert.Equal(t, expected, actual) - }) -} diff --git a/storage/badger/operation/heights.go b/storage/badger/operation/heights.go deleted file mode 100644 index 9e4efe79c91..00000000000 --- a/storage/badger/operation/heights.go +++ /dev/null @@ -1,91 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" -) - -func InsertRootHeight(height uint64) func(*badger.Txn) error { - return insert(makePrefix(codeFinalizedRootHeight), height) -} - -func RetrieveRootHeight(height *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeFinalizedRootHeight), height) -} - -func InsertSealedRootHeight(height uint64) func(*badger.Txn) error { - return insert(makePrefix(codeSealedRootHeight), height) -} - -func RetrieveSealedRootHeight(height *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeSealedRootHeight), height) -} - -func InsertFinalizedHeight(height uint64) func(*badger.Txn) error { - return insert(makePrefix(codeFinalizedHeight), height) -} - -func UpdateFinalizedHeight(height uint64) func(*badger.Txn) error { - return update(makePrefix(codeFinalizedHeight), height) -} - -func RetrieveFinalizedHeight(height *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeFinalizedHeight), height) -} - -func InsertSealedHeight(height uint64) func(*badger.Txn) error { - return insert(makePrefix(codeSealedHeight), height) -} - -func UpdateSealedHeight(height uint64) func(*badger.Txn) error { - return update(makePrefix(codeSealedHeight), height) -} - -func RetrieveSealedHeight(height *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeSealedHeight), height) -} - -// InsertEpochFirstHeight inserts the height of the first block in the given epoch. -// The first block of an epoch E is the finalized block with view >= E.FirstView. -// Although we don't store the final height of an epoch, it can be inferred from this index. -// Returns storage.ErrAlreadyExists if the height has already been indexed. -func InsertEpochFirstHeight(epoch, height uint64) func(*badger.Txn) error { - return insert(makePrefix(codeEpochFirstHeight, epoch), height) -} - -// RetrieveEpochFirstHeight retrieves the height of the first block in the given epoch. -// Returns storage.ErrNotFound if the first block of the epoch has not yet been finalized. -func RetrieveEpochFirstHeight(epoch uint64, height *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeEpochFirstHeight, epoch), height) -} - -// RetrieveEpochLastHeight retrieves the height of the last block in the given epoch. -// It's a more readable, but equivalent query to RetrieveEpochFirstHeight when interested in the last height of an epoch. -// Returns storage.ErrNotFound if the first block of the epoch has not yet been finalized. -func RetrieveEpochLastHeight(epoch uint64, height *uint64) func(*badger.Txn) error { - var nextEpochFirstHeight uint64 - return func(tx *badger.Txn) error { - if err := retrieve(makePrefix(codeEpochFirstHeight, epoch+1), &nextEpochFirstHeight)(tx); err != nil { - return err - } - *height = nextEpochFirstHeight - 1 - return nil - } -} - -// InsertLastCompleteBlockHeightIfNotExists inserts the last full block height if it is not already set. -// Calling this function multiple times is a no-op and returns no expected errors. -func InsertLastCompleteBlockHeightIfNotExists(height uint64) func(*badger.Txn) error { - return SkipDuplicates(InsertLastCompleteBlockHeight(height)) -} - -func InsertLastCompleteBlockHeight(height uint64) func(*badger.Txn) error { - return insert(makePrefix(codeLastCompleteBlockHeight), height) -} - -func UpdateLastCompleteBlockHeight(height uint64) func(*badger.Txn) error { - return update(makePrefix(codeLastCompleteBlockHeight), height) -} - -func RetrieveLastCompleteBlockHeight(height *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeLastCompleteBlockHeight), height) -} diff --git a/storage/badger/operation/heights_test.go b/storage/badger/operation/heights_test.go deleted file mode 100644 index 30ad8452c46..00000000000 --- a/storage/badger/operation/heights_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package operation - -import ( - "math/rand" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestFinalizedInsertUpdateRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - height := uint64(1337) - - err := db.Update(InsertFinalizedHeight(height)) - require.NoError(t, err) - - var retrieved uint64 - err = db.View(RetrieveFinalizedHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height) - - height = 9999 - err = db.Update(UpdateFinalizedHeight(height)) - require.NoError(t, err) - - err = db.View(RetrieveFinalizedHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height) - }) -} - -func TestSealedInsertUpdateRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - height := uint64(1337) - - err := db.Update(InsertSealedHeight(height)) - require.NoError(t, err) - - var retrieved uint64 - err = db.View(RetrieveSealedHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height) - - height = 9999 - err = db.Update(UpdateSealedHeight(height)) - require.NoError(t, err) - - err = db.View(RetrieveSealedHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height) - }) -} - -func TestEpochFirstBlockIndex_InsertRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - height := rand.Uint64() - epoch := rand.Uint64() - - // retrieve when empty errors - var retrieved uint64 - err := db.View(RetrieveEpochFirstHeight(epoch, &retrieved)) - require.ErrorIs(t, err, storage.ErrNotFound) - - // can insert - err = db.Update(InsertEpochFirstHeight(epoch, height)) - require.NoError(t, err) - - // can retrieve - err = db.View(RetrieveEpochFirstHeight(epoch, &retrieved)) - require.NoError(t, err) - assert.Equal(t, retrieved, height) - - // retrieve non-existent key errors - err = db.View(RetrieveEpochFirstHeight(epoch+1, &retrieved)) - require.ErrorIs(t, err, storage.ErrNotFound) - - // insert existent key errors - err = db.Update(InsertEpochFirstHeight(epoch, height)) - require.ErrorIs(t, err, storage.ErrAlreadyExists) - }) -} - -func TestLastCompleteBlockHeightInsertUpdateRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - height := uint64(1337) - - err := db.Update(InsertLastCompleteBlockHeight(height)) - require.NoError(t, err) - - var retrieved uint64 - err = db.View(RetrieveLastCompleteBlockHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height) - - height = 9999 - err = db.Update(UpdateLastCompleteBlockHeight(height)) - require.NoError(t, err) - - err = db.View(RetrieveLastCompleteBlockHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height) - }) -} - -func TestLastCompleteBlockHeightInsertIfNotExists(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - height1 := uint64(1337) - - err := db.Update(InsertLastCompleteBlockHeightIfNotExists(height1)) - require.NoError(t, err) - - var retrieved uint64 - err = db.View(RetrieveLastCompleteBlockHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height1) - - height2 := uint64(9999) - err = db.Update(InsertLastCompleteBlockHeightIfNotExists(height2)) - require.NoError(t, err) - - err = db.View(RetrieveLastCompleteBlockHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height1) - }) -} diff --git a/storage/badger/operation/jobs.go b/storage/badger/operation/jobs.go deleted file mode 100644 index 1bad48f752f..00000000000 --- a/storage/badger/operation/jobs.go +++ /dev/null @@ -1,29 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -func RetrieveJobLatestIndex(queue string, index *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeJobQueuePointer, queue), index) -} - -func InitJobLatestIndex(queue string, index uint64) func(*badger.Txn) error { - return insert(makePrefix(codeJobQueuePointer, queue), index) -} - -func SetJobLatestIndex(queue string, index uint64) func(*badger.Txn) error { - return update(makePrefix(codeJobQueuePointer, queue), index) -} - -// RetrieveJobAtIndex returns the entity at the given index -func RetrieveJobAtIndex(queue string, index uint64, entity *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeJobQueue, queue, index), entity) -} - -// InsertJobAtIndex insert an entity ID at the given index -func InsertJobAtIndex(queue string, index uint64, entity flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeJobQueue, queue, index), entity) -} diff --git a/storage/badger/operation/payload.go b/storage/badger/operation/payload.go deleted file mode 100644 index 91fc0488122..00000000000 --- a/storage/badger/operation/payload.go +++ /dev/null @@ -1,85 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -func InsertSeal(sealID flow.Identifier, seal *flow.Seal) func(*badger.Txn) error { - return insert(makePrefix(codeSeal, sealID), seal) -} - -func RetrieveSeal(sealID flow.Identifier, seal *flow.Seal) func(*badger.Txn) error { - return retrieve(makePrefix(codeSeal, sealID), seal) -} - -func IndexPayloadSeals(blockID flow.Identifier, sealIDs []flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codePayloadSeals, blockID), sealIDs) -} - -func LookupPayloadSeals(blockID flow.Identifier, sealIDs *[]flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codePayloadSeals, blockID), sealIDs) -} - -func IndexPayloadReceipts(blockID flow.Identifier, receiptIDs []flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codePayloadReceipts, blockID), receiptIDs) -} - -func IndexPayloadResults(blockID flow.Identifier, resultIDs []flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codePayloadResults, blockID), resultIDs) -} - -func IndexPayloadProtocolStateID(blockID flow.Identifier, stateID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codePayloadProtocolStateID, blockID), stateID) -} - -func LookupPayloadProtocolStateID(blockID flow.Identifier, stateID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codePayloadProtocolStateID, blockID), stateID) -} - -func LookupPayloadReceipts(blockID flow.Identifier, receiptIDs *[]flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codePayloadReceipts, blockID), receiptIDs) -} - -func LookupPayloadResults(blockID flow.Identifier, resultIDs *[]flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codePayloadResults, blockID), resultIDs) -} - -// IndexLatestSealAtBlock persists the highest seal that was included in the fork up to (and including) blockID. -// In most cases, it is the highest seal included in this block's payload. However, if there are no -// seals in this block, sealID should reference the highest seal in blockID's ancestor. -func IndexLatestSealAtBlock(blockID flow.Identifier, sealID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeBlockIDToLatestSealID, blockID), sealID) -} - -// LookupLatestSealAtBlock finds the highest seal that was included in the fork up to (and including) blockID. -// In most cases, it is the highest seal included in this block's payload. However, if there are no -// seals in this block, sealID should reference the highest seal in blockID's ancestor. -func LookupLatestSealAtBlock(blockID flow.Identifier, sealID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeBlockIDToLatestSealID, blockID), &sealID) -} - -// IndexFinalizedSealByBlockID indexes the _finalized_ seal by the sealed block ID. -// Example: A <- B <- C(SealA) -// when block C is finalized, we create the index `A.ID->SealA.ID` -func IndexFinalizedSealByBlockID(sealedBlockID flow.Identifier, sealID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeBlockIDToFinalizedSeal, sealedBlockID), sealID) -} - -// LookupBySealedBlockID finds the seal for the given sealed block ID. -func LookupBySealedBlockID(sealedBlockID flow.Identifier, sealID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeBlockIDToFinalizedSeal, sealedBlockID), &sealID) -} - -func InsertExecutionForkEvidence(conflictingSeals []*flow.IncorporatedResultSeal) func(*badger.Txn) error { - return insert(makePrefix(codeExecutionFork), conflictingSeals) -} - -func RemoveExecutionForkEvidence() func(*badger.Txn) error { - return remove(makePrefix(codeExecutionFork)) -} - -func RetrieveExecutionForkEvidence(conflictingSeals *[]*flow.IncorporatedResultSeal) func(*badger.Txn) error { - return retrieve(makePrefix(codeExecutionFork), conflictingSeals) -} diff --git a/storage/badger/operation/payload_test.go b/storage/badger/operation/payload_test.go deleted file mode 100644 index fcb86cd8e8b..00000000000 --- a/storage/badger/operation/payload_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestSealInsertCheckRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := unittest.Seal.Fixture() - - err := db.Update(InsertSeal(expected.ID(), expected)) - require.NoError(t, err) - - var actual flow.Seal - err = db.View(RetrieveSeal(expected.ID(), &actual)) - require.NoError(t, err) - - assert.Equal(t, expected, &actual) - }) -} - -func TestSealIndexAndLookup(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - seal1 := unittest.Seal.Fixture() - seal2 := unittest.Seal.Fixture() - - seals := []*flow.Seal{seal1, seal2} - - blockID := flow.MakeID([]byte{0x42}) - - expected := []flow.Identifier(flow.GetIDs(seals)) - - err := db.Update(func(tx *badger.Txn) error { - for _, seal := range seals { - if err := InsertSeal(seal.ID(), seal)(tx); err != nil { - return err - } - } - if err := IndexPayloadSeals(blockID, expected)(tx); err != nil { - return err - } - return nil - }) - require.NoError(t, err) - - var actual []flow.Identifier - err = db.View(LookupPayloadSeals(blockID, &actual)) - require.NoError(t, err) - - assert.Equal(t, expected, actual) - }) -} diff --git a/storage/badger/operation/prefix.go b/storage/badger/operation/prefix.go index 70dc90f1b15..279cee81a45 100644 --- a/storage/badger/operation/prefix.go +++ b/storage/badger/operation/prefix.go @@ -10,106 +10,11 @@ const ( codeMax = 1 // keeps track of the maximum key size codeDBType = 2 // specifies a database type - // codes for views with special meaning - codeSafetyData = 10 // safety data for hotstuff state - codeLivenessData = 11 // liveness data for hotstuff state - - // codes for fields associated with the root state - codeSporkID = 13 - _ = 14 // DEPRECATED: 14 was used for ProtocolVersion before the versioned Protocol State - _ = 15 // DEPRECATED: 15 was used to save the finalization safety threshold - codeSporkRootBlockHeight = 16 - - // code for heights with special meaning - codeFinalizedHeight = 20 // latest finalized block height - codeSealedHeight = 21 // latest sealed block height - codeClusterHeight = 22 // latest finalized height on cluster - codeExecutedBlock = 23 // latest executed block with max height - codeFinalizedRootHeight = 24 // the height of the highest finalized block contained in the root snapshot - codeLastCompleteBlockHeight = 25 // the height of the last block for which all collections were received - codeEpochFirstHeight = 26 // the height of the first block in a given epoch - codeSealedRootHeight = 27 // the height of the highest sealed block contained in the root snapshot - - // codes for single entity storage - codeHeader = 30 - _ = 31 // DEPRECATED: 31 was used for identities before epochs - codeGuarantee = 32 - codeSeal = 33 - codeTransaction = 34 - codeCollection = 35 - codeExecutionResult = 36 - codeResultApproval = 37 - codeChunk = 38 - codeExecutionReceiptMeta = 39 // NOTE: prior to Mainnet25, this erroneously had the same value as codeExecutionResult (36) - - // codes for indexing single identifier by identifier/integer - codeHeightToBlock = 40 // index mapping height to block ID - codeBlockIDToLatestSealID = 41 // index mapping a block its last payload seal - codeClusterBlockToRefBlock = 42 // index cluster block ID to reference block ID - codeRefHeightToClusterBlock = 43 // index reference block height to cluster block IDs - codeBlockIDToFinalizedSeal = 44 // index _finalized_ seal by sealed block ID - codeBlockIDToQuorumCertificate = 45 // index of quorum certificates by block ID - codeEpochProtocolStateByBlockID = 46 // index of epoch protocol state entry by block ID - codeProtocolKVStoreByBlockID = 47 // index of protocol KV store entry by block ID - - // codes for indexing multiple identifiers by identifier - codeBlockChildren = 50 // index mapping block ID to children blocks - _ = 51 // DEPRECATED: 51 was used for identity indexes before epochs - codePayloadGuarantees = 52 // index mapping block ID to payload guarantees - codePayloadSeals = 53 // index mapping block ID to payload seals - codeCollectionBlock = 54 // index mapping collection ID to block ID - codeOwnBlockReceipt = 55 // index mapping block ID to execution receipt ID for execution nodes - _ = 56 // DEPRECATED: 56 was used for block->epoch status prior to Dynamic Protocol State in Mainnet25 - codePayloadReceipts = 57 // index mapping block ID to payload receipts - codePayloadResults = 58 // index mapping block ID to payload results - codeAllBlockReceipts = 59 // index mapping of blockID to multiple receipts - codePayloadProtocolStateID = 60 // index mapping block ID to payload protocol state ID - // codes related to protocol level information - codeEpochSetup = 61 // EpochSetup service event, keyed by ID - codeEpochCommit = 62 // EpochCommit service event, keyed by ID - codeBeaconPrivateKey = 63 // BeaconPrivateKey, keyed by epoch counter - _ = 64 // [DEPRECATED] flag that the DKG for an epoch has been started, used in protocol version v1 - codeDKGEndState = 65 // [DEPRECATED] flag for DKG end state, used in protocol version v1 - codeDKGState = 66 // current state of Recoverable Random Beacon State Machine for given epoch - codeVersionBeacon = 67 // flag for storing version beacons - codeEpochProtocolState = 68 - codeProtocolKVStore = 69 - - // code for ComputationResult upload status storage - // NOTE: for now only GCP uploader is supported. When other uploader (AWS e.g.) needs to - // be supported, we will need to define new code. - _ = 66 // used by ComputationResults in storage/operation - - // job queue consumers and producers - codeJobConsumerProcessed = 70 - codeJobQueue = 71 - codeJobQueuePointer = 72 - - // legacy codes (should be cleaned up) - codeChunkDataPack = 100 - codeCommit = 101 - codeEvent = 102 - codeExecutionStateInteractions = 103 - codeTransactionResult = 104 - codeFinalizedCluster = 105 - codeServiceEvent = 106 - codeTransactionResultIndex = 107 - codeLightTransactionResult = 108 - codeLightTransactionResultIndex = 109 - codeTransactionResultErrorMessage = 110 - codeTransactionResultErrorMessageIndex = 111 - codeIndexCollection = 200 - codeIndexExecutionResultByBlock = 202 - codeIndexCollectionByTransaction = 203 - codeIndexResultApprovalByChunk = 204 - - // TEMPORARY codes - disallowedNodeIDs = 205 // manual override for adding node IDs to list of ejected nodes, applies to networking layer only - - // internal failure information that should be preserved across restarts - codeExecutionFork = 254 - codeEpochEmergencyFallbackTriggered = 255 + codeBeaconPrivateKey = 63 // BeaconPrivateKey, keyed by epoch counter + _ = 64 // DEPRECATED: flag that the DKG for an epoch has been started + codeDKGEndState = 65 // DEPRECATED: flag that the DKG for an epoch has ended (stores end state) + codeDKGState = 66 // current state of Recoverable Random Beacon State Machine for given epoch ) func makePrefix(code byte, keys ...any) []byte { diff --git a/storage/badger/operation/qcs.go b/storage/badger/operation/qcs.go deleted file mode 100644 index 651a585b2b2..00000000000 --- a/storage/badger/operation/qcs.go +++ /dev/null @@ -1,19 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertQuorumCertificate inserts a quorum certificate by block ID. -// Returns storage.ErrAlreadyExists if a QC has already been inserted for the block. -func InsertQuorumCertificate(qc *flow.QuorumCertificate) func(*badger.Txn) error { - return insert(makePrefix(codeBlockIDToQuorumCertificate, qc.BlockID), qc) -} - -// RetrieveQuorumCertificate retrieves a quorum certificate by blockID. -// Returns storage.ErrNotFound if no QC is stored for the block. -func RetrieveQuorumCertificate(blockID flow.Identifier, qc *flow.QuorumCertificate) func(*badger.Txn) error { - return retrieve(makePrefix(codeBlockIDToQuorumCertificate, blockID), qc) -} diff --git a/storage/badger/operation/qcs_test.go b/storage/badger/operation/qcs_test.go deleted file mode 100644 index 38d9b07889b..00000000000 --- a/storage/badger/operation/qcs_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestInsertQuorumCertificate(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := unittest.QuorumCertificateFixture() - - err := db.Update(InsertQuorumCertificate(expected)) - require.NoError(t, err) - - var actual flow.QuorumCertificate - err = db.View(RetrieveQuorumCertificate(expected.BlockID, &actual)) - require.NoError(t, err) - - assert.Equal(t, expected, &actual) - }) -} diff --git a/storage/badger/operation/receipts.go b/storage/badger/operation/receipts.go deleted file mode 100644 index 7224819cb6c..00000000000 --- a/storage/badger/operation/receipts.go +++ /dev/null @@ -1,87 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertExecutionReceiptMeta inserts an execution receipt meta by ID. -func InsertExecutionReceiptMeta(receiptID flow.Identifier, meta *flow.ExecutionReceiptMeta) func(*badger.Txn) error { - return insert(makePrefix(codeExecutionReceiptMeta, receiptID), meta) -} - -// BatchInsertExecutionReceiptMeta inserts an execution receipt meta by ID. -// TODO: rename to BatchUpdate -func BatchInsertExecutionReceiptMeta(receiptID flow.Identifier, meta *flow.ExecutionReceiptMeta) func(batch *badger.WriteBatch) error { - return batchWrite(makePrefix(codeExecutionReceiptMeta, receiptID), meta) -} - -// RetrieveExecutionReceiptMeta retrieves a execution receipt meta by ID. -func RetrieveExecutionReceiptMeta(receiptID flow.Identifier, meta *flow.ExecutionReceiptMeta) func(*badger.Txn) error { - return retrieve(makePrefix(codeExecutionReceiptMeta, receiptID), meta) -} - -// IndexOwnExecutionReceipt inserts an execution receipt ID keyed by block ID -func IndexOwnExecutionReceipt(blockID flow.Identifier, receiptID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeOwnBlockReceipt, blockID), receiptID) -} - -// BatchIndexOwnExecutionReceipt inserts an execution receipt ID keyed by block ID into a batch -// TODO: rename to BatchUpdate -func BatchIndexOwnExecutionReceipt(blockID flow.Identifier, receiptID flow.Identifier) func(batch *badger.WriteBatch) error { - return batchWrite(makePrefix(codeOwnBlockReceipt, blockID), receiptID) -} - -// LookupOwnExecutionReceipt finds execution receipt ID by block -func LookupOwnExecutionReceipt(blockID flow.Identifier, receiptID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeOwnBlockReceipt, blockID), receiptID) -} - -// RemoveOwnExecutionReceipt removes own execution receipt index by blockID -func RemoveOwnExecutionReceipt(blockID flow.Identifier) func(*badger.Txn) error { - return remove(makePrefix(codeOwnBlockReceipt, blockID)) -} - -// BatchRemoveOwnExecutionReceipt removes blockID-to-my-receiptID index entries keyed by a blockID in a provided batch. -// No errors are expected during normal operation, but it may return generic error -// if badger fails to process request -func BatchRemoveOwnExecutionReceipt(blockID flow.Identifier) func(batch *badger.WriteBatch) error { - return batchRemove(makePrefix(codeOwnBlockReceipt, blockID)) -} - -// IndexExecutionReceipts inserts an execution receipt ID keyed by block ID and receipt ID. -// one block could have multiple receipts, even if they are from the same executor -func IndexExecutionReceipts(blockID, receiptID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeAllBlockReceipts, blockID, receiptID), receiptID) -} - -// BatchIndexExecutionReceipts inserts an execution receipt ID keyed by block ID and receipt ID into a batch -func BatchIndexExecutionReceipts(blockID, receiptID flow.Identifier) func(batch *badger.WriteBatch) error { - return batchWrite(makePrefix(codeAllBlockReceipts, blockID, receiptID), receiptID) -} - -// LookupExecutionReceipts finds all execution receipts by block ID -func LookupExecutionReceipts(blockID flow.Identifier, receiptIDs *[]flow.Identifier) func(*badger.Txn) error { - iterationFunc := receiptIterationFunc(receiptIDs) - return traverse(makePrefix(codeAllBlockReceipts, blockID), iterationFunc) -} - -// receiptIterationFunc returns an in iteration function which returns all receipt IDs found during traversal -func receiptIterationFunc(receiptIDs *[]flow.Identifier) func() (checkFunc, createFunc, handleFunc) { - check := func(key []byte) bool { - return true - } - - var receiptID flow.Identifier - create := func() interface{} { - return &receiptID - } - handle := func() error { - *receiptIDs = append(*receiptIDs, receiptID) - return nil - } - return func() (checkFunc, createFunc, handleFunc) { - return check, create, handle - } -} diff --git a/storage/badger/operation/receipts_test.go b/storage/badger/operation/receipts_test.go deleted file mode 100644 index ac607dc33a7..00000000000 --- a/storage/badger/operation/receipts_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestReceipts_InsertRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - receipt := unittest.ExecutionReceiptFixture() - expected := receipt.Meta() - - err := db.Update(InsertExecutionReceiptMeta(receipt.ID(), expected)) - require.NoError(t, err) - - var actual flow.ExecutionReceiptMeta - err = db.View(RetrieveExecutionReceiptMeta(receipt.ID(), &actual)) - require.NoError(t, err) - - assert.Equal(t, expected, &actual) - }) -} - -func TestReceipts_Index(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - receipt := unittest.ExecutionReceiptFixture() - expected := receipt.ID() - blockID := receipt.ExecutionResult.BlockID - - err := db.Update(IndexOwnExecutionReceipt(blockID, expected)) - require.NoError(t, err) - - var actual flow.Identifier - err = db.View(LookupOwnExecutionReceipt(blockID, &actual)) - require.NoError(t, err) - - assert.Equal(t, expected, actual) - }) -} - -func TestReceipts_MultiIndex(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := []flow.Identifier{unittest.IdentifierFixture(), unittest.IdentifierFixture()} - blockID := unittest.IdentifierFixture() - - for _, id := range expected { - err := db.Update(IndexExecutionReceipts(blockID, id)) - require.NoError(t, err) - } - var actual []flow.Identifier - err := db.View(LookupExecutionReceipts(blockID, &actual)) - require.NoError(t, err) - - assert.ElementsMatch(t, expected, actual) - }) -} diff --git a/storage/badger/operation/results.go b/storage/badger/operation/results.go deleted file mode 100644 index 8e762cc5b41..00000000000 --- a/storage/badger/operation/results.go +++ /dev/null @@ -1,54 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertExecutionResult inserts an execution result by ID. -func InsertExecutionResult(result *flow.ExecutionResult) func(*badger.Txn) error { - return insert(makePrefix(codeExecutionResult, result.ID()), result) -} - -// BatchInsertExecutionResult inserts an execution result by ID. -func BatchInsertExecutionResult(result *flow.ExecutionResult) func(batch *badger.WriteBatch) error { - return batchWrite(makePrefix(codeExecutionResult, result.ID()), result) -} - -// RetrieveExecutionResult retrieves a transaction by fingerprint. -func RetrieveExecutionResult(resultID flow.Identifier, result *flow.ExecutionResult) func(*badger.Txn) error { - return retrieve(makePrefix(codeExecutionResult, resultID), result) -} - -// IndexExecutionResult inserts an execution result ID keyed by block ID -func IndexExecutionResult(blockID flow.Identifier, resultID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeIndexExecutionResultByBlock, blockID), resultID) -} - -// ReindexExecutionResult updates mapping of an execution result ID keyed by block ID -func ReindexExecutionResult(blockID flow.Identifier, resultID flow.Identifier) func(*badger.Txn) error { - return update(makePrefix(codeIndexExecutionResultByBlock, blockID), resultID) -} - -// BatchIndexExecutionResult inserts an execution result ID keyed by block ID into a batch -func BatchIndexExecutionResult(blockID flow.Identifier, resultID flow.Identifier) func(batch *badger.WriteBatch) error { - return batchWrite(makePrefix(codeIndexExecutionResultByBlock, blockID), resultID) -} - -// LookupExecutionResult finds execution result ID by block -func LookupExecutionResult(blockID flow.Identifier, resultID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeIndexExecutionResultByBlock, blockID), resultID) -} - -// RemoveExecutionResultIndex removes execution result indexed by the given blockID -func RemoveExecutionResultIndex(blockID flow.Identifier) func(*badger.Txn) error { - return remove(makePrefix(codeIndexExecutionResultByBlock, blockID)) -} - -// BatchRemoveExecutionResultIndex removes blockID-to-resultID index entries keyed by a blockID in a provided batch. -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func BatchRemoveExecutionResultIndex(blockID flow.Identifier) func(*badger.WriteBatch) error { - return batchRemove(makePrefix(codeIndexExecutionResultByBlock, blockID)) -} diff --git a/storage/badger/operation/results_test.go b/storage/badger/operation/results_test.go deleted file mode 100644 index 0221e51dd8f..00000000000 --- a/storage/badger/operation/results_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestResults_InsertRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := unittest.ExecutionResultFixture() - - err := db.Update(InsertExecutionResult(expected)) - require.NoError(t, err) - - var actual flow.ExecutionResult - err = db.View(RetrieveExecutionResult(expected.ID(), &actual)) - require.NoError(t, err) - - assert.Equal(t, expected, &actual) - }) -} diff --git a/storage/badger/operation/spork.go b/storage/badger/operation/spork.go deleted file mode 100644 index 508fab5d9b8..00000000000 --- a/storage/badger/operation/spork.go +++ /dev/null @@ -1,31 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertSporkID inserts the spork ID for the present spork. A single database -// and protocol state instance spans at most one spork, so this is inserted -// exactly once, when bootstrapping the state. -func InsertSporkID(sporkID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeSporkID), sporkID) -} - -// RetrieveSporkID retrieves the spork ID for the present spork. -func RetrieveSporkID(sporkID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeSporkID), sporkID) -} - -// InsertSporkRootBlockHeight inserts the spork root block height for the present spork. -// A single database and protocol state instance spans at most one spork, so this is inserted -// exactly once, when bootstrapping the state. -func InsertSporkRootBlockHeight(height uint64) func(*badger.Txn) error { - return insert(makePrefix(codeSporkRootBlockHeight), height) -} - -// RetrieveSporkRootBlockHeight retrieves the spork root block height for the present spork. -func RetrieveSporkRootBlockHeight(height *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeSporkRootBlockHeight), height) -} diff --git a/storage/badger/operation/spork_test.go b/storage/badger/operation/spork_test.go deleted file mode 100644 index 148ee059861..00000000000 --- a/storage/badger/operation/spork_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestSporkID_InsertRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - sporkID := unittest.IdentifierFixture() - - err := db.Update(InsertSporkID(sporkID)) - require.NoError(t, err) - - var actual flow.Identifier - err = db.View(RetrieveSporkID(&actual)) - require.NoError(t, err) - - assert.Equal(t, sporkID, actual) - }) -} diff --git a/storage/badger/operation/transactions.go b/storage/badger/operation/transactions.go deleted file mode 100644 index 1ad372bc6a7..00000000000 --- a/storage/badger/operation/transactions.go +++ /dev/null @@ -1,17 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertTransaction inserts a transaction keyed by transaction fingerprint. -func InsertTransaction(txID flow.Identifier, tx *flow.TransactionBody) func(*badger.Txn) error { - return insert(makePrefix(codeTransaction, txID), tx) -} - -// RetrieveTransaction retrieves a transaction by fingerprint. -func RetrieveTransaction(txID flow.Identifier, tx *flow.TransactionBody) func(*badger.Txn) error { - return retrieve(makePrefix(codeTransaction, txID), tx) -} diff --git a/storage/badger/operation/transactions_test.go b/storage/badger/operation/transactions_test.go deleted file mode 100644 index 4bcff7a5d66..00000000000 --- a/storage/badger/operation/transactions_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestTransactions(t *testing.T) { - - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := unittest.TransactionFixture() - err := db.Update(InsertTransaction(expected.ID(), &expected.TransactionBody)) - require.NoError(t, err) - - var actual flow.Transaction - err = db.View(RetrieveTransaction(expected.ID(), &actual.TransactionBody)) - require.NoError(t, err) - assert.Equal(t, expected, actual) - }) -} diff --git a/storage/badger/operation/version_beacon.go b/storage/badger/operation/version_beacon.go deleted file mode 100644 index b1f538eddf0..00000000000 --- a/storage/badger/operation/version_beacon.go +++ /dev/null @@ -1,32 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// IndexVersionBeaconByHeight stores a sealed version beacon indexed by -// flow.SealedVersionBeacon.SealHeight. -// -// No errors are expected during normal operation. -// deprecated, this is still needed by the state/protocol/badger/mutator.go -func IndexVersionBeaconByHeight( - beacon *flow.SealedVersionBeacon, -) func(*badger.Txn) error { - return upsert(makePrefix(codeVersionBeacon, beacon.SealHeight), beacon) -} - -// LookupLastVersionBeaconByHeight finds the highest flow.VersionBeacon but no higher -// than maxHeight. Returns storage.ErrNotFound if no version beacon exists at or below -// the given height. -func LookupLastVersionBeaconByHeight( - maxHeight uint64, - versionBeacon *flow.SealedVersionBeacon, -) func(*badger.Txn) error { - return findHighestAtOrBelow( - makePrefix(codeVersionBeacon), - maxHeight, - versionBeacon, - ) -} diff --git a/storage/badger/operation/version_beacon_test.go b/storage/badger/operation/version_beacon_test.go deleted file mode 100644 index d46ed334f93..00000000000 --- a/storage/badger/operation/version_beacon_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestResults_IndexByServiceEvents(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - height1 := uint64(21) - height2 := uint64(37) - height3 := uint64(55) - vb1 := flow.SealedVersionBeacon{ - VersionBeacon: unittest.VersionBeaconFixture( - unittest.WithBoundaries( - flow.VersionBoundary{ - Version: "1.0.0", - BlockHeight: height1 + 5, - }, - ), - ), - SealHeight: height1, - } - vb2 := flow.SealedVersionBeacon{ - VersionBeacon: unittest.VersionBeaconFixture( - unittest.WithBoundaries( - flow.VersionBoundary{ - Version: "1.1.0", - BlockHeight: height2 + 5, - }, - ), - ), - SealHeight: height2, - } - vb3 := flow.SealedVersionBeacon{ - VersionBeacon: unittest.VersionBeaconFixture( - unittest.WithBoundaries( - flow.VersionBoundary{ - Version: "2.0.0", - BlockHeight: height3 + 5, - }, - ), - ), - SealHeight: height3, - } - - // indexing 3 version beacons at different heights - err := db.Update(IndexVersionBeaconByHeight(&vb1)) - require.NoError(t, err) - - err = db.Update(IndexVersionBeaconByHeight(&vb2)) - require.NoError(t, err) - - err = db.Update(IndexVersionBeaconByHeight(&vb3)) - require.NoError(t, err) - - // index version beacon 2 again to make sure we tolerate duplicates - // it is possible for two or more events of the same type to be from the same height - err = db.Update(IndexVersionBeaconByHeight(&vb2)) - require.NoError(t, err) - - t.Run("retrieve exact height match", func(t *testing.T) { - var actualVB flow.SealedVersionBeacon - err := db.View(LookupLastVersionBeaconByHeight(height1, &actualVB)) - require.NoError(t, err) - require.Equal(t, vb1, actualVB) - - err = db.View(LookupLastVersionBeaconByHeight(height2, &actualVB)) - require.NoError(t, err) - require.Equal(t, vb2, actualVB) - - err = db.View(LookupLastVersionBeaconByHeight(height3, &actualVB)) - require.NoError(t, err) - require.Equal(t, vb3, actualVB) - }) - - t.Run("finds highest but not higher than given", func(t *testing.T) { - var actualVB flow.SealedVersionBeacon - - err := db.View(LookupLastVersionBeaconByHeight(height3-1, &actualVB)) - require.NoError(t, err) - require.Equal(t, vb2, actualVB) - }) - - t.Run("finds highest", func(t *testing.T) { - var actualVB flow.SealedVersionBeacon - - err := db.View(LookupLastVersionBeaconByHeight(height3+1, &actualVB)) - require.NoError(t, err) - require.Equal(t, vb3, actualVB) - }) - - t.Run("height below lowest entry returns nothing", func(t *testing.T) { - var actualVB flow.SealedVersionBeacon - - err := db.View(LookupLastVersionBeaconByHeight(height1-1, &actualVB)) - require.ErrorIs(t, err, storage.ErrNotFound) - }) - }) -} diff --git a/storage/badger/operation/views.go b/storage/badger/operation/views.go deleted file mode 100644 index 018b184b248..00000000000 --- a/storage/badger/operation/views.go +++ /dev/null @@ -1,20 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/consensus/hotstuff" - "github.com/onflow/flow-go/model/flow" -) - -// InsertSafetyData inserts safety data into the database. -// Deprecated: this function will be replaced by `operation.UpsertSafetyData` from the `storage/operation` package when moving to Pebble -func InsertSafetyData(chainID flow.ChainID, safetyData *hotstuff.SafetyData) func(*badger.Txn) error { - return insert(makePrefix(codeSafetyData, chainID), safetyData) -} - -// InsertLivenessData inserts liveness data into the database. -// Deprecated: this function will be replaced by `operation.UpsertLivenessData` from the `storage/operation` package when moving to Pebble -func InsertLivenessData(chainID flow.ChainID, livenessData *hotstuff.LivenessData) func(*badger.Txn) error { - return insert(makePrefix(codeLivenessData, chainID), livenessData) -} diff --git a/storage/badger/payloads.go b/storage/badger/payloads.go deleted file mode 100644 index c4d57277c72..00000000000 --- a/storage/badger/payloads.go +++ /dev/null @@ -1,166 +0,0 @@ -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -type Payloads struct { - db *badger.DB - index *Index - guarantees *Guarantees - seals *Seals - receipts *ExecutionReceipts - results *ExecutionResults -} - -func NewPayloads(db *badger.DB, index *Index, guarantees *Guarantees, seals *Seals, receipts *ExecutionReceipts, - results *ExecutionResults) *Payloads { - - p := &Payloads{ - db: db, - index: index, - guarantees: guarantees, - seals: seals, - receipts: receipts, - results: results, - } - - return p -} - -func (p *Payloads) storeTx(blockID flow.Identifier, payload *flow.Payload) func(*transaction.Tx) error { - // For correct payloads, the execution result is part of the payload or it's already stored - // in storage. If execution result is not present in either of those places, we error. - // ATTENTION: this is unnecessarily complex if we have execution receipt which points an execution result - // which is not included in current payload but was incorporated in one of previous blocks. - - return func(tx *transaction.Tx) error { - - resultsByID := payload.Results.Lookup() - fullReceipts := make([]*flow.ExecutionReceipt, 0, len(payload.Receipts)) - var err error - for _, meta := range payload.Receipts { - result, ok := resultsByID[meta.ResultID] - if !ok { - result, err = p.results.ByIDTx(meta.ResultID)(tx) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - err = fmt.Errorf("invalid payload referencing unknown execution result %v, err: %w", meta.ResultID, err) - } - return err - } - } - fullReceipts = append(fullReceipts, flow.ExecutionReceiptFromMeta(*meta, *result)) - } - - // make sure all payload guarantees are stored - for _, guarantee := range payload.Guarantees { - err := p.guarantees.storeTx(guarantee)(tx) - if err != nil { - return fmt.Errorf("could not store guarantee: %w", err) - } - } - - // make sure all payload seals are stored - for _, seal := range payload.Seals { - err := p.seals.storeTx(seal)(tx) - if err != nil { - return fmt.Errorf("could not store seal: %w", err) - } - } - - // store all payload receipts - for _, receipt := range fullReceipts { - err := p.receipts.storeTx(receipt)(tx) - if err != nil { - return fmt.Errorf("could not store receipt: %w", err) - } - } - - // store the index - err = p.index.storeTx(blockID, payload.Index())(tx) - if err != nil { - return fmt.Errorf("could not store index: %w", err) - } - - return nil - } -} - -func (p *Payloads) retrieveTx(blockID flow.Identifier) func(tx *badger.Txn) (*flow.Payload, error) { - return func(tx *badger.Txn) (*flow.Payload, error) { - - // retrieve the index - idx, err := p.index.retrieveTx(blockID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve index: %w", err) - } - - // retrieve guarantees - guarantees := make([]*flow.CollectionGuarantee, 0, len(idx.CollectionIDs)) - for _, collID := range idx.CollectionIDs { - guarantee, err := p.guarantees.retrieveTx(collID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve guarantee (%x): %w", collID, err) - } - guarantees = append(guarantees, guarantee) - } - - // retrieve seals - seals := make([]*flow.Seal, 0, len(idx.SealIDs)) - for _, sealID := range idx.SealIDs { - seal, err := p.seals.retrieveTx(sealID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve seal (%x): %w", sealID, err) - } - seals = append(seals, seal) - } - - // retrieve receipts - receipts := make([]*flow.ExecutionReceiptMeta, 0, len(idx.ReceiptIDs)) - for _, recID := range idx.ReceiptIDs { - receipt, err := p.receipts.byID(recID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve receipt %x: %w", recID, err) - } - receipts = append(receipts, receipt.Meta()) - } - - // retrieve results - results := make([]*flow.ExecutionResult, 0, len(idx.ResultIDs)) - for _, resID := range idx.ResultIDs { - result, err := p.results.byID(resID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve result %x: %w", resID, err) - } - results = append(results, result) - } - payload := &flow.Payload{ - Seals: seals, - Guarantees: guarantees, - Receipts: receipts, - Results: results, - ProtocolStateID: idx.ProtocolStateID, - } - - return payload, nil - } -} - -func (p *Payloads) Store(blockID flow.Identifier, payload *flow.Payload) error { - return operation.RetryOnConflictTx(p.db, transaction.Update, p.storeTx(blockID, payload)) -} - -func (p *Payloads) ByBlockID(blockID flow.Identifier) (*flow.Payload, error) { - tx := p.db.NewTransaction(false) - defer tx.Discard() - return p.retrieveTx(blockID)(tx) -} diff --git a/storage/badger/payloads_test.go b/storage/badger/payloads_test.go deleted file mode 100644 index e796c2d946e..00000000000 --- a/storage/badger/payloads_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -func TestPayloadStoreRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - - index := badgerstorage.NewIndex(metrics, db) - seals := badgerstorage.NewSeals(metrics, db) - guarantees := badgerstorage.NewGuarantees(metrics, db, badgerstorage.DefaultCacheSize) - results := badgerstorage.NewExecutionResults(metrics, db) - receipts := badgerstorage.NewExecutionReceipts(metrics, db, results, badgerstorage.DefaultCacheSize) - store := badgerstorage.NewPayloads(db, index, guarantees, seals, receipts, results) - - blockID := unittest.IdentifierFixture() - expected := unittest.PayloadFixture(unittest.WithAllTheFixins) - - // store payload - err := store.Store(blockID, &expected) - require.NoError(t, err) - - // fetch payload - payload, err := store.ByBlockID(blockID) - require.NoError(t, err) - require.Equal(t, &expected, payload) - }) -} - -func TestPayloadRetreiveWithoutStore(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - - index := badgerstorage.NewIndex(metrics, db) - seals := badgerstorage.NewSeals(metrics, db) - guarantees := badgerstorage.NewGuarantees(metrics, db, badgerstorage.DefaultCacheSize) - results := badgerstorage.NewExecutionResults(metrics, db) - receipts := badgerstorage.NewExecutionReceipts(metrics, db, results, badgerstorage.DefaultCacheSize) - store := badgerstorage.NewPayloads(db, index, guarantees, seals, receipts, results) - - blockID := unittest.IdentifierFixture() - - _, err := store.ByBlockID(blockID) - require.ErrorIs(t, err, storage.ErrNotFound) - }) -} diff --git a/storage/badger/procedure/children.go b/storage/badger/procedure/children.go deleted file mode 100644 index e95412f6403..00000000000 --- a/storage/badger/procedure/children.go +++ /dev/null @@ -1,82 +0,0 @@ -package procedure - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" -) - -// IndexNewBlock will add parent-child index for the new block. -// - Each block has a parent, we use this parent-child relationship to build a reverse index -// - for looking up children blocks for a given block. This is useful for forks recovery -// where we want to find all the pending children blocks for the lastest finalized block. -// -// When adding parent-child index for a new block, we will add two indexes: -// 1. since it's a new block, the new block should have no child, so adding an empty -// index for the new block. Note: It's impossible there is a block whose parent is the -// new block. -// 2. since the parent block has this new block as a child, adding an index for that. -// there are two special cases for (2): -// - if the parent block is zero, then we don't need to add this index. -// - if the parent block doesn't exist, then we will insert the child index instead of updating -func IndexNewBlock(blockID flow.Identifier, parentID flow.Identifier) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - // Step 1: index the child for the new block. - // the new block has no child, so adding an empty child index for it - err := operation.InsertBlockChildren(blockID, nil)(tx) - if err != nil { - return fmt.Errorf("could not insert empty block children: %w", err) - } - - // Step 2: adding the second index for the parent block - // if the parent block is zero, for instance root block has no parent, - // then no need to add index for it - if parentID == flow.ZeroID { - return nil - } - - // if the parent block is not zero, depending on whether the parent block has - // children or not, we will either update the index or insert the index: - // when parent block doesn't exist, we will insert the block children. - // when parent block exists already, we will update the block children, - var childrenIDs flow.IdentifierList - err = operation.RetrieveBlockChildren(parentID, &childrenIDs)(tx) - - var saveIndex func(blockID flow.Identifier, childrenIDs flow.IdentifierList) func(*badger.Txn) error - if errors.Is(err, storage.ErrNotFound) { - saveIndex = operation.InsertBlockChildren - } else if err != nil { - return fmt.Errorf("could not look up block children: %w", err) - } else { // err == nil - saveIndex = operation.UpdateBlockChildren - } - - // check we don't add a duplicate - for _, dupID := range childrenIDs { - if blockID == dupID { - return storage.ErrAlreadyExists - } - } - - // adding the new block to be another child of the parent - childrenIDs = append(childrenIDs, blockID) - - // saving the index - err = saveIndex(parentID, childrenIDs)(tx) - if err != nil { - return fmt.Errorf("could not update children index: %w", err) - } - - return nil - } -} - -// LookupBlockChildren looks up the IDs of all child blocks of the given parent block. -func LookupBlockChildren(blockID flow.Identifier, childrenIDs *flow.IdentifierList) func(tx *badger.Txn) error { - return operation.RetrieveBlockChildren(blockID, childrenIDs) -} diff --git a/storage/badger/procedure/children_test.go b/storage/badger/procedure/children_test.go deleted file mode 100644 index b3df751d00e..00000000000 --- a/storage/badger/procedure/children_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package procedure_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/procedure" - "github.com/onflow/flow-go/utils/unittest" -) - -// after indexing a block by its parent, it should be able to retrieve the child block by the parentID -func TestIndexAndLookupChild(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - parentID := unittest.IdentifierFixture() - childID := unittest.IdentifierFixture() - - err := db.Update(procedure.IndexNewBlock(childID, parentID)) - require.NoError(t, err) - - // retrieve child - var retrievedIDs flow.IdentifierList - err = db.View(procedure.LookupBlockChildren(parentID, &retrievedIDs)) - require.NoError(t, err) - - // retrieved child should be the stored child - require.Equal(t, flow.IdentifierList{childID}, retrievedIDs) - }) -} - -// if two blocks connect to the same parent, indexing the second block would have -// no effect, retrieving the child of the parent block will return the first block that -// was indexed. -func TestIndexTwiceAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - parentID := unittest.IdentifierFixture() - child1ID := unittest.IdentifierFixture() - child2ID := unittest.IdentifierFixture() - - // index the first child - err := db.Update(procedure.IndexNewBlock(child1ID, parentID)) - require.NoError(t, err) - - // index the second child - err = db.Update(procedure.IndexNewBlock(child2ID, parentID)) - require.NoError(t, err) - - var retrievedIDs flow.IdentifierList - err = db.View(procedure.LookupBlockChildren(parentID, &retrievedIDs)) - require.NoError(t, err) - - require.Equal(t, flow.IdentifierList{child1ID, child2ID}, retrievedIDs) - }) -} - -// if parent is zero, then we don't index it -func TestIndexZeroParent(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - childID := unittest.IdentifierFixture() - - err := db.Update(procedure.IndexNewBlock(childID, flow.ZeroID)) - require.NoError(t, err) - - // zero id should have no children - var retrievedIDs flow.IdentifierList - err = db.View(procedure.LookupBlockChildren(flow.ZeroID, &retrievedIDs)) - require.ErrorIs(t, err, storage.ErrNotFound) - }) -} - -// lookup block children will only return direct childrens -func TestDirectChildren(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - b1 := unittest.IdentifierFixture() - b2 := unittest.IdentifierFixture() - b3 := unittest.IdentifierFixture() - b4 := unittest.IdentifierFixture() - - err := db.Update(procedure.IndexNewBlock(b2, b1)) - require.NoError(t, err) - - err = db.Update(procedure.IndexNewBlock(b3, b2)) - require.NoError(t, err) - - err = db.Update(procedure.IndexNewBlock(b4, b3)) - require.NoError(t, err) - - // check the children of the first block - var retrievedIDs flow.IdentifierList - - err = db.View(procedure.LookupBlockChildren(b1, &retrievedIDs)) - require.NoError(t, err) - require.Equal(t, flow.IdentifierList{b2}, retrievedIDs) - - err = db.View(procedure.LookupBlockChildren(b2, &retrievedIDs)) - require.NoError(t, err) - require.Equal(t, flow.IdentifierList{b3}, retrievedIDs) - - err = db.View(procedure.LookupBlockChildren(b3, &retrievedIDs)) - require.NoError(t, err) - require.Equal(t, flow.IdentifierList{b4}, retrievedIDs) - - err = db.View(procedure.LookupBlockChildren(b4, &retrievedIDs)) - require.NoError(t, err) - require.Nil(t, retrievedIDs) - }) -} diff --git a/storage/badger/procedure/cluster.go b/storage/badger/procedure/cluster.go deleted file mode 100644 index d54e26be857..00000000000 --- a/storage/badger/procedure/cluster.go +++ /dev/null @@ -1,233 +0,0 @@ -package procedure - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/cluster" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/operation" -) - -// This file implements storage functions for blocks in cluster consensus. - -// InsertClusterBlock inserts a cluster consensus block, updating all -// associated indexes. -func InsertClusterBlock(block *cluster.Block) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - - // check payload integrity - if block.Header.PayloadHash != block.Payload.Hash() { - return fmt.Errorf("computed payload hash does not match header") - } - - // store the block header - blockID := block.ID() - err := operation.InsertHeader(blockID, block.Header)(tx) - if err != nil { - return fmt.Errorf("could not insert header: %w", err) - } - - // insert the block payload - err = InsertClusterPayload(blockID, block.Payload)(tx) - if err != nil { - return fmt.Errorf("could not insert payload: %w", err) - } - - // index the child block for recovery - err = IndexNewBlock(blockID, block.Header.ParentID)(tx) - if err != nil { - return fmt.Errorf("could not index new block: %w", err) - } - return nil - } -} - -// RetrieveClusterBlock retrieves a cluster consensus block by block ID. -func RetrieveClusterBlock(blockID flow.Identifier, block *cluster.Block) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - - // retrieve the block header - var header flow.Header - err := operation.RetrieveHeader(blockID, &header)(tx) - if err != nil { - return fmt.Errorf("could not retrieve header: %w", err) - } - - // retrieve payload - var payload cluster.Payload - err = RetrieveClusterPayload(blockID, &payload)(tx) - if err != nil { - return fmt.Errorf("could not retrieve payload: %w", err) - } - - // overwrite block - *block = cluster.Block{ - Header: &header, - Payload: &payload, - } - - return nil - } -} - -// RetrieveLatestFinalizedClusterHeader retrieves the latest finalized for the -// given cluster chain ID. -func RetrieveLatestFinalizedClusterHeader(chainID flow.ChainID, final *flow.Header) func(tx *badger.Txn) error { - return func(tx *badger.Txn) error { - var boundary uint64 - err := operation.RetrieveClusterFinalizedHeight(chainID, &boundary)(tx) - if err != nil { - return fmt.Errorf("could not retrieve boundary: %w", err) - } - - var finalID flow.Identifier - err = operation.LookupClusterBlockHeight(chainID, boundary, &finalID)(tx) - if err != nil { - return fmt.Errorf("could not retrieve final ID: %w", err) - } - - err = operation.RetrieveHeader(finalID, final)(tx) - if err != nil { - return fmt.Errorf("could not retrieve finalized header: %w", err) - } - - return nil - } -} - -// FinalizeClusterBlock finalizes a block in cluster consensus. -func FinalizeClusterBlock(blockID flow.Identifier) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - - // retrieve the header to check the parent - var header flow.Header - err := operation.RetrieveHeader(blockID, &header)(tx) - if err != nil { - return fmt.Errorf("could not retrieve header: %w", err) - } - - // get the chain ID, which determines which cluster state to query - chainID := header.ChainID - - // retrieve the current finalized state boundary - var boundary uint64 - err = operation.RetrieveClusterFinalizedHeight(chainID, &boundary)(tx) - if err != nil { - return fmt.Errorf("could not retrieve boundary: %w", err) - } - - // retrieve the ID of the boundary head - var headID flow.Identifier - err = operation.LookupClusterBlockHeight(chainID, boundary, &headID)(tx) - if err != nil { - return fmt.Errorf("could not retrieve head: %w", err) - } - - // check that the head ID is the parent of the block we finalize - if header.ParentID != headID { - return fmt.Errorf("can't finalize non-child of chain head") - } - - // insert block view -> ID mapping - err = operation.IndexClusterBlockHeight(chainID, header.Height, header.ID())(tx) - if err != nil { - return fmt.Errorf("could not insert view->ID mapping: %w", err) - } - - // update the finalized boundary - err = operation.UpdateClusterFinalizedHeight(chainID, header.Height)(tx) - if err != nil { - return fmt.Errorf("could not update finalized boundary: %w", err) - } - - // NOTE: we don't want to prune forks that have become invalid here, so - // that we can keep validating entities and generating slashing - // challenges for some time - the pruning should happen some place else - // after a certain delay of blocks - - return nil - } -} - -// InsertClusterPayload inserts the payload for a cluster block. It inserts -// both the collection and all constituent transactions, allowing duplicates. -func InsertClusterPayload(blockID flow.Identifier, payload *cluster.Payload) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - - // cluster payloads only contain a single collection, allow duplicates, - // because it is valid for two competing forks to have the same payload. - light := payload.Collection.Light() - // SkipDuplicates here is to ignore the error if the collection already exists - // This means the Insert operation is actually a Upsert operation. - // The upsert is ok, because the data is unique by its ID - err := operation.SkipDuplicates(operation.InsertCollection(&light))(tx) - if err != nil { - return fmt.Errorf("could not insert payload collection: %w", err) - } - - // insert constituent transactions - for _, colTx := range payload.Collection.Transactions { - // SkipDuplicates here is to ignore the error if the collection already exists - // This means the Insert operation is actually a Upsert operation. - // The upsert is ok, because the data is unique by its ID - err = operation.SkipDuplicates(operation.InsertTransaction(colTx.ID(), colTx))(tx) - if err != nil { - return fmt.Errorf("could not insert payload transaction: %w", err) - } - } - - // index the transaction IDs within the collection - txIDs := payload.Collection.Light().Transactions - // SkipDuplicates here is to ignore the error if the collection already exists - // This means the Insert operation is actually a Upsert operation. - err = operation.SkipDuplicates(operation.IndexCollectionPayload(blockID, txIDs))(tx) - if err != nil { - return fmt.Errorf("could not index collection: %w", err) - } - - // insert the reference block ID - err = operation.IndexReferenceBlockByClusterBlock(blockID, payload.ReferenceBlockID)(tx) - if err != nil { - return fmt.Errorf("could not insert reference block ID: %w", err) - } - - return nil - } -} - -// RetrieveClusterPayload retrieves a cluster consensus block payload by block ID. -func RetrieveClusterPayload(blockID flow.Identifier, payload *cluster.Payload) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - - // lookup the reference block ID - var refID flow.Identifier - err := operation.LookupReferenceBlockByClusterBlock(blockID, &refID)(tx) - if err != nil { - return fmt.Errorf("could not retrieve reference block ID: %w", err) - } - - // lookup collection transaction IDs - var txIDs []flow.Identifier - err = operation.LookupCollectionPayload(blockID, &txIDs)(tx) - if err != nil { - return fmt.Errorf("could not look up collection payload: %w", err) - } - - colTransactions := make([]*flow.TransactionBody, 0, len(txIDs)) - // retrieve individual transactions - for _, txID := range txIDs { - var nextTx flow.TransactionBody - err = operation.RetrieveTransaction(txID, &nextTx)(tx) - if err != nil { - return fmt.Errorf("could not retrieve transaction: %w", err) - } - colTransactions = append(colTransactions, &nextTx) - } - - *payload = cluster.PayloadFromTransactions(refID, colTransactions...) - - return nil - } -} diff --git a/storage/badger/procedure/cluster_test.go b/storage/badger/procedure/cluster_test.go deleted file mode 100644 index 325c7919454..00000000000 --- a/storage/badger/procedure/cluster_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package procedure - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/cluster" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestInsertRetrieveClusterBlock(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - block := unittest.ClusterBlockFixture() - - err := db.Update(InsertClusterBlock(&block)) - require.NoError(t, err) - - var retrieved cluster.Block - err = db.View(RetrieveClusterBlock(block.Header.ID(), &retrieved)) - require.NoError(t, err) - - require.Equal(t, block, retrieved) - }) -} - -func TestFinalizeClusterBlock(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - parent := unittest.ClusterBlockFixture() - - block := unittest.ClusterBlockWithParent(&parent) - - err := db.Update(InsertClusterBlock(&block)) - require.NoError(t, err) - - err = db.Update(operation.IndexClusterBlockHeight(block.Header.ChainID, parent.Header.Height, parent.ID())) - require.NoError(t, err) - - err = db.Update(operation.InsertClusterFinalizedHeight(block.Header.ChainID, parent.Header.Height)) - require.NoError(t, err) - - err = db.Update(FinalizeClusterBlock(block.Header.ID())) - require.NoError(t, err) - - var boundary uint64 - err = db.View(operation.RetrieveClusterFinalizedHeight(block.Header.ChainID, &boundary)) - require.NoError(t, err) - require.Equal(t, block.Header.Height, boundary) - - var headID flow.Identifier - err = db.View(operation.LookupClusterBlockHeight(block.Header.ChainID, boundary, &headID)) - require.NoError(t, err) - require.Equal(t, block.ID(), headID) - }) -} diff --git a/storage/badger/procedure/index.go b/storage/badger/procedure/index.go deleted file mode 100644 index 1416759f590..00000000000 --- a/storage/badger/procedure/index.go +++ /dev/null @@ -1,75 +0,0 @@ -package procedure - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/operation" -) - -func InsertIndex(blockID flow.Identifier, index *flow.Index) func(tx *badger.Txn) error { - return func(tx *badger.Txn) error { - err := operation.IndexPayloadGuarantees(blockID, index.CollectionIDs)(tx) - if err != nil { - return fmt.Errorf("could not store guarantee index: %w", err) - } - err = operation.IndexPayloadSeals(blockID, index.SealIDs)(tx) - if err != nil { - return fmt.Errorf("could not store seal index: %w", err) - } - err = operation.IndexPayloadReceipts(blockID, index.ReceiptIDs)(tx) - if err != nil { - return fmt.Errorf("could not store receipts index: %w", err) - } - err = operation.IndexPayloadResults(blockID, index.ResultIDs)(tx) - if err != nil { - return fmt.Errorf("could not store results index: %w", err) - } - err = operation.IndexPayloadProtocolStateID(blockID, index.ProtocolStateID)(tx) - if err != nil { - return fmt.Errorf("could not store protocol state id: %w", err) - } - return nil - } -} - -func RetrieveIndex(blockID flow.Identifier, index *flow.Index) func(tx *badger.Txn) error { - return func(tx *badger.Txn) error { - var collIDs []flow.Identifier - err := operation.LookupPayloadGuarantees(blockID, &collIDs)(tx) - if err != nil { - return fmt.Errorf("could not retrieve guarantee index: %w", err) - } - var sealIDs []flow.Identifier - err = operation.LookupPayloadSeals(blockID, &sealIDs)(tx) - if err != nil { - return fmt.Errorf("could not retrieve seal index: %w", err) - } - var receiptIDs []flow.Identifier - err = operation.LookupPayloadReceipts(blockID, &receiptIDs)(tx) - if err != nil { - return fmt.Errorf("could not retrieve receipts index: %w", err) - } - var resultsIDs []flow.Identifier - err = operation.LookupPayloadResults(blockID, &resultsIDs)(tx) - if err != nil { - return fmt.Errorf("could not retrieve results index: %w", err) - } - var stateID flow.Identifier - err = operation.LookupPayloadProtocolStateID(blockID, &stateID)(tx) - if err != nil { - return fmt.Errorf("could not retrieve protocol state id: %w", err) - } - - *index = flow.Index{ - CollectionIDs: collIDs, - SealIDs: sealIDs, - ReceiptIDs: receiptIDs, - ResultIDs: resultsIDs, - ProtocolStateID: stateID, - } - return nil - } -} diff --git a/storage/badger/procedure/index_test.go b/storage/badger/procedure/index_test.go deleted file mode 100644 index 77a3c32bc9b..00000000000 --- a/storage/badger/procedure/index_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package procedure - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestInsertRetrieveIndex(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - blockID := unittest.IdentifierFixture() - index := unittest.IndexFixture() - - err := db.Update(InsertIndex(blockID, index)) - require.NoError(t, err) - - var retrieved flow.Index - err = db.View(RetrieveIndex(blockID, &retrieved)) - require.NoError(t, err) - - require.Equal(t, index, &retrieved) - }) -} diff --git a/storage/badger/protocol_kv_store.go b/storage/badger/protocol_kv_store.go deleted file mode 100644 index 9064f248a8b..00000000000 --- a/storage/badger/protocol_kv_store.go +++ /dev/null @@ -1,154 +0,0 @@ -package badger - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// DefaultProtocolKVStoreCacheSize is the default size for primary protocol KV store cache. -// KV store is rarely updated, so we will have a limited number of unique snapshots. -// Let's be generous and assume we have 10 different KV stores used at the same time. -var DefaultProtocolKVStoreCacheSize uint = 10 - -// DefaultProtocolKVStoreByBlockIDCacheSize is the default value for secondary index `byBlockIdCache`. -// We want to be able to cover a broad interval of views without cache misses, so we use a bigger value. -// Generally, many blocks will reference the same KV store snapshot. -var DefaultProtocolKVStoreByBlockIDCacheSize uint = 1000 - -// ProtocolKVStore implements persistent storage for storing KV store snapshots. -type ProtocolKVStore struct { - db *badger.DB - - // cache holds versioned binary blobs representing snapshots of key-value stores. We use the kv-store's - // ID as key for retrieving the versioned binary snapshot of the kv-store. Consumers must know how to - // deal with the binary representation. `cache` only holds the distinct snapshots. On the happy path, - // we expect single-digit number of unique snapshots within an epoch. - cache *Cache[flow.Identifier, *flow.PSKeyValueStoreData] - - // byBlockIdCache is essentially an in-memory map from `Block.ID()` -> `KeyValueStore.ID()`. The full - // kv-store snapshot can be retrieved from the `cache` above. - // `byBlockIdCache` will contain an entry for every block. We want to be able to cover a broad interval of views - // without cache misses, so a cache size of roughly 1000 entries is reasonable. - byBlockIdCache *Cache[flow.Identifier, flow.Identifier] -} - -var _ storage.ProtocolKVStore = (*ProtocolKVStore)(nil) - -// NewProtocolKVStore creates a ProtocolKVStore instance, which is a database holding KV store snapshots. -// It supports storing, caching and retrieving by ID or the additionally indexed block ID. -func NewProtocolKVStore(collector module.CacheMetrics, - db *badger.DB, - kvStoreCacheSize uint, - kvStoreByBlockIDCacheSize uint, -) *ProtocolKVStore { - retrieveByStateID := func(stateID flow.Identifier) func(tx *badger.Txn) (*flow.PSKeyValueStoreData, error) { - return func(tx *badger.Txn) (*flow.PSKeyValueStoreData, error) { - var kvStore flow.PSKeyValueStoreData - err := operation.RetrieveProtocolKVStore(stateID, &kvStore)(tx) - if err != nil { - return nil, fmt.Errorf("could not get kv snapshot by id (%x): %w", stateID, err) - } - return &kvStore, nil - } - } - storeByStateID := func(stateID flow.Identifier, data *flow.PSKeyValueStoreData) func(*transaction.Tx) error { - return transaction.WithTx(operation.InsertProtocolKVStore(stateID, data)) - } - - storeByBlockID := func(blockID flow.Identifier, stateID flow.Identifier) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - err := transaction.WithTx(operation.IndexProtocolKVStore(blockID, stateID))(tx) - if err != nil { - return fmt.Errorf("could not index protocol state for block (%x): %w", blockID[:], err) - } - return nil - } - } - - retrieveByBlockID := func(blockID flow.Identifier) func(tx *badger.Txn) (flow.Identifier, error) { - return func(tx *badger.Txn) (flow.Identifier, error) { - var stateID flow.Identifier - err := operation.LookupProtocolKVStore(blockID, &stateID)(tx) - if err != nil { - return flow.ZeroID, fmt.Errorf("could not lookup protocol state ID for block (%x): %w", blockID[:], err) - } - return stateID, nil - } - } - - return &ProtocolKVStore{ - db: db, - cache: newCache[flow.Identifier, *flow.PSKeyValueStoreData](collector, metrics.ResourceProtocolKVStore, - withLimit[flow.Identifier, *flow.PSKeyValueStoreData](kvStoreCacheSize), - withStore(storeByStateID), - withRetrieve(retrieveByStateID)), - byBlockIdCache: newCache[flow.Identifier, flow.Identifier](collector, metrics.ResourceProtocolKVStoreByBlockID, - withLimit[flow.Identifier, flow.Identifier](kvStoreByBlockIDCacheSize), - withStore(storeByBlockID), - withRetrieve(retrieveByBlockID)), - } -} - -// StoreTx returns an anonymous function (intended to be executed as part of a badger transaction), -// which persists the given KV-store snapshot as part of a DB tx. -// Expected errors of the returned anonymous function: -// - storage.ErrAlreadyExists if a KV-store snapshot with the given id is already stored. -func (s *ProtocolKVStore) StoreTx(stateID flow.Identifier, data *flow.PSKeyValueStoreData) func(*transaction.Tx) error { - return s.cache.PutTx(stateID, data) -} - -// IndexTx returns an anonymous function intended to be executed as part of a database transaction. -// In a nutshell, we want to maintain a map from `blockID` to `stateID`, where `blockID` references the -// block that _proposes_ updated key-value store. -// Upon call, the anonymous function persists the specific map entry in the node's database. -// Protocol convention: -// - Consider block B, whose ingestion might potentially lead to an updated KV store. For example, -// the KV store changes if we seal some execution results emitting specific service events. -// - For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store. -// - CAUTION: The updated state requires confirmation by a QC and will only become active at the child block, -// _after_ validating the QC. -// -// Expected errors during normal operations: -// - storage.ErrAlreadyExists if a KV store for the given blockID has already been indexed. -func (s *ProtocolKVStore) IndexTx(blockID flow.Identifier, stateID flow.Identifier) func(*transaction.Tx) error { - return s.byBlockIdCache.PutTx(blockID, stateID) -} - -// ByID retrieves the KV store snapshot with the given ID. -// Expected errors during normal operations: -// - storage.ErrNotFound if no snapshot with the given Identifier is known. -func (s *ProtocolKVStore) ByID(id flow.Identifier) (*flow.PSKeyValueStoreData, error) { - tx := s.db.NewTransaction(false) - defer tx.Discard() - return s.cache.Get(id)(tx) -} - -// ByBlockID retrieves the kv-store snapshot that the block with the given ID proposes. -// CAUTION: this store snapshot requires confirmation by a QC and will only become active at the child block, -// _after_ validating the QC. Protocol convention: -// - Consider block B, whose ingestion might potentially lead to an updated KV store state. -// For example, the state changes if we seal some execution results emitting specific service events. -// - For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store. As value, -// the hash of the resulting state at the end of processing B is to be used. -// - CAUTION: The updated state requires confirmation by a QC and will only become active at the child block, -// _after_ validating the QC. -// -// Expected errors during normal operations: -// - storage.ErrNotFound if no snapshot has been indexed for the given block. -func (s *ProtocolKVStore) ByBlockID(blockID flow.Identifier) (*flow.PSKeyValueStoreData, error) { - tx := s.db.NewTransaction(false) - defer tx.Discard() - stateID, err := s.byBlockIdCache.Get(blockID)(tx) - if err != nil { - return nil, fmt.Errorf("could not lookup protocol state ID for block (%x): %w", blockID[:], err) - } - return s.cache.Get(stateID)(tx) -} diff --git a/storage/badger/protocol_kv_store_test.go b/storage/badger/protocol_kv_store_test.go deleted file mode 100644 index d12fd05938b..00000000000 --- a/storage/badger/protocol_kv_store_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package badger - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/transaction" - "github.com/onflow/flow-go/utils/unittest" -) - -// TesKeyValueStoreStorage tests if the KV store is stored, retrieved and indexed correctly -func TestKeyValueStoreStorage(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) - - expected := &flow.PSKeyValueStoreData{ - Version: 2, - Data: unittest.RandomBytes(32), - } - stateID := unittest.IdentifierFixture() - blockID := unittest.IdentifierFixture() - - // store protocol state and auxiliary info - err := transaction.Update(db, func(tx *transaction.Tx) error { - err := store.StoreTx(stateID, expected)(tx) - require.NoError(t, err) - return store.IndexTx(blockID, stateID)(tx) - }) - require.NoError(t, err) - - // fetch protocol state - actual, err := store.ByID(stateID) - require.NoError(t, err) - assert.Equal(t, expected, actual) - - // fetch protocol state by block ID - actualByBlockID, err := store.ByBlockID(blockID) - require.NoError(t, err) - assert.Equal(t, expected, actualByBlockID) - }) -} - -// TestProtocolKVStore_StoreTx tests that StoreTx returns an error if the KV-store snapshot with the given id is already stored. -func TestProtocolKVStore_StoreTx(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) - - stateID := unittest.IdentifierFixture() - expected := &flow.PSKeyValueStoreData{ - Version: 2, - Data: unittest.RandomBytes(32), - } - - err := transaction.Update(db, store.StoreTx(stateID, expected)) - require.NoError(t, err) - - err = transaction.Update(db, store.StoreTx(stateID, expected)) - require.ErrorIs(t, err, storage.ErrAlreadyExists) - }) -} - -// TestProtocolKVStore_IndexTx tests that IndexTx returns an error if a KV store for the given blockID has already been indexed. -func TestProtocolKVStore_IndexTx(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) - - stateID := unittest.IdentifierFixture() - blockID := unittest.IdentifierFixture() - - err := transaction.Update(db, store.IndexTx(blockID, stateID)) - require.NoError(t, err) - - err = transaction.Update(db, store.IndexTx(blockID, stateID)) - require.ErrorIs(t, err, storage.ErrAlreadyExists) - }) -} - -// TestProtocolKVStore_ByBlockID tests that ByBlockID returns an error if no snapshot has been indexed for the given block. -func TestProtocolKVStore_ByBlockID(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) - - blockID := unittest.IdentifierFixture() - _, err := store.ByBlockID(blockID) - require.ErrorIs(t, err, storage.ErrNotFound) - }) -} - -// TestProtocolKVStore_ByID tests that ByID returns an error if no snapshot with the given Identifier is known. -func TestProtocolKVStore_ByID(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) - - stateID := unittest.IdentifierFixture() - _, err := store.ByID(stateID) - require.ErrorIs(t, err, storage.ErrNotFound) - }) -} diff --git a/storage/badger/qcs.go b/storage/badger/qcs.go deleted file mode 100644 index 856595184d4..00000000000 --- a/storage/badger/qcs.go +++ /dev/null @@ -1,64 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// QuorumCertificates implements persistent storage for quorum certificates. -type QuorumCertificates struct { - db *badger.DB - cache *Cache[flow.Identifier, *flow.QuorumCertificate] -} - -var _ storage.QuorumCertificates = (*QuorumCertificates)(nil) - -// NewQuorumCertificates Creates QuorumCertificates instance which is a database of quorum certificates -// which supports storing, caching and retrieving by block ID. -func NewQuorumCertificates(collector module.CacheMetrics, db *badger.DB, cacheSize uint) *QuorumCertificates { - store := func(_ flow.Identifier, qc *flow.QuorumCertificate) func(*transaction.Tx) error { - return transaction.WithTx(operation.InsertQuorumCertificate(qc)) - } - - retrieve := func(blockID flow.Identifier) func(tx *badger.Txn) (*flow.QuorumCertificate, error) { - return func(tx *badger.Txn) (*flow.QuorumCertificate, error) { - var qc flow.QuorumCertificate - err := operation.RetrieveQuorumCertificate(blockID, &qc)(tx) - return &qc, err - } - } - - return &QuorumCertificates{ - db: db, - cache: newCache[flow.Identifier, *flow.QuorumCertificate](collector, metrics.ResourceQC, - withLimit[flow.Identifier, *flow.QuorumCertificate](cacheSize), - withStore(store), - withRetrieve(retrieve)), - } -} - -func (q *QuorumCertificates) StoreTx(qc *flow.QuorumCertificate) func(*transaction.Tx) error { - return q.cache.PutTx(qc.BlockID, qc) -} - -func (q *QuorumCertificates) ByBlockID(blockID flow.Identifier) (*flow.QuorumCertificate, error) { - tx := q.db.NewTransaction(false) - defer tx.Discard() - return q.retrieveTx(blockID)(tx) -} - -func (q *QuorumCertificates) retrieveTx(blockID flow.Identifier) func(*badger.Txn) (*flow.QuorumCertificate, error) { - return func(tx *badger.Txn) (*flow.QuorumCertificate, error) { - val, err := q.cache.Get(blockID)(tx) - if err != nil { - return nil, err - } - return val, nil - } -} diff --git a/storage/badger/qcs_test.go b/storage/badger/qcs_test.go deleted file mode 100644 index 51cb0bc8a86..00000000000 --- a/storage/badger/qcs_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestQuorumCertificates_StoreTx tests storing and retrieving of QC. -func TestQuorumCertificates_StoreTx(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewQuorumCertificates(metrics, db, 10) - qc := unittest.QuorumCertificateFixture() - - err := operation.RetryOnConflictTx(db, transaction.Update, store.StoreTx(qc)) - require.NoError(t, err) - - actual, err := store.ByBlockID(qc.BlockID) - require.NoError(t, err) - - require.Equal(t, qc, actual) - }) -} - -// TestQuorumCertificates_StoreTx_OtherQC checks if storing other QC for same blockID results in -// expected storage error and already stored value is not overwritten. -func TestQuorumCertificates_StoreTx_OtherQC(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewQuorumCertificates(metrics, db, 10) - qc := unittest.QuorumCertificateFixture() - otherQC := unittest.QuorumCertificateFixture(func(otherQC *flow.QuorumCertificate) { - otherQC.View = qc.View - otherQC.BlockID = qc.BlockID - }) - - err := operation.RetryOnConflictTx(db, transaction.Update, store.StoreTx(qc)) - require.NoError(t, err) - - err = operation.RetryOnConflictTx(db, transaction.Update, store.StoreTx(otherQC)) - require.ErrorIs(t, err, storage.ErrAlreadyExists) - - actual, err := store.ByBlockID(otherQC.BlockID) - require.NoError(t, err) - - require.Equal(t, qc, actual) - }) -} - -// TestQuorumCertificates_ByBlockID that ByBlockID returns correct sentinel error if no QC for given block ID has been found -func TestQuorumCertificates_ByBlockID(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewQuorumCertificates(metrics, db, 10) - - actual, err := store.ByBlockID(unittest.IdentifierFixture()) - require.ErrorIs(t, err, storage.ErrNotFound) - require.Nil(t, actual) - }) -} diff --git a/storage/badger/receipts.go b/storage/badger/receipts.go deleted file mode 100644 index 79edbd66b8c..00000000000 --- a/storage/badger/receipts.go +++ /dev/null @@ -1,133 +0,0 @@ -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// ExecutionReceipts implements storage for execution receipts. -type ExecutionReceipts struct { - db *badger.DB - cache *Cache[flow.Identifier, *flow.ExecutionReceipt] -} - -var _ storage.ExecutionReceipts = (*ExecutionReceipts)(nil) - -// NewExecutionReceipts Creates ExecutionReceipts instance which is a database of receipts which -// supports storing and indexing receipts by receipt ID and block ID. -func NewExecutionReceipts(collector module.CacheMetrics, db *badger.DB, results *ExecutionResults, cacheSize uint) *ExecutionReceipts { - store := func(receiptID flow.Identifier, receipt *flow.ExecutionReceipt) func(*transaction.Tx) error { - // assemble DB operations to store result (no execution) - storeResultOps := results.store(&receipt.ExecutionResult) - // assemble DB operations to index receipt (no execution) - storeReceiptOps := transaction.WithTx(operation.SkipDuplicates(operation.InsertExecutionReceiptMeta(receiptID, receipt.Meta()))) - // assemble DB operations to index receipt by the block it computes (no execution) - indexReceiptOps := transaction.WithTx(operation.SkipDuplicates( - operation.IndexExecutionReceipts(receipt.ExecutionResult.BlockID, receiptID), - )) - - return func(tx *transaction.Tx) error { - err := storeResultOps(tx) // execute operations to store results - if err != nil { - return fmt.Errorf("could not store result: %w", err) - } - err = storeReceiptOps(tx) // execute operations to store receipt-specific meta-data - if err != nil { - return fmt.Errorf("could not store receipt metadata: %w", err) - } - err = indexReceiptOps(tx) - if err != nil { - return fmt.Errorf("could not index receipt by the block it computes: %w", err) - } - return nil - } - } - - retrieve := func(receiptID flow.Identifier) func(tx *badger.Txn) (*flow.ExecutionReceipt, error) { - return func(tx *badger.Txn) (*flow.ExecutionReceipt, error) { - var meta flow.ExecutionReceiptMeta - err := operation.RetrieveExecutionReceiptMeta(receiptID, &meta)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve receipt meta: %w", err) - } - result, err := results.byID(meta.ResultID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve result: %w", err) - } - return flow.ExecutionReceiptFromMeta(meta, *result), nil - } - } - - return &ExecutionReceipts{ - db: db, - cache: newCache(collector, metrics.ResourceReceipt, - withLimit[flow.Identifier, *flow.ExecutionReceipt](cacheSize), - withStore(store), - withRetrieve(retrieve)), - } -} - -// storeMyReceipt assembles the operations to store an arbitrary receipt. -func (r *ExecutionReceipts) storeTx(receipt *flow.ExecutionReceipt) func(*transaction.Tx) error { - return r.cache.PutTx(receipt.ID(), receipt) -} - -func (r *ExecutionReceipts) byID(receiptID flow.Identifier) func(*badger.Txn) (*flow.ExecutionReceipt, error) { - retrievalOps := r.cache.Get(receiptID) // assemble DB operations to retrieve receipt (no execution) - return func(tx *badger.Txn) (*flow.ExecutionReceipt, error) { - val, err := retrievalOps(tx) // execute operations to retrieve receipt - if err != nil { - return nil, err - } - return val, nil - } -} - -func (r *ExecutionReceipts) byBlockID(blockID flow.Identifier) func(*badger.Txn) ([]*flow.ExecutionReceipt, error) { - return func(tx *badger.Txn) ([]*flow.ExecutionReceipt, error) { - var receiptIDs []flow.Identifier - err := operation.LookupExecutionReceipts(blockID, &receiptIDs)(tx) - if err != nil && !errors.Is(err, storage.ErrNotFound) { - return nil, fmt.Errorf("could not find receipt index for block: %w", err) - } - - var receipts []*flow.ExecutionReceipt - for _, id := range receiptIDs { - receipt, err := r.byID(id)(tx) - if err != nil { - return nil, fmt.Errorf("could not find receipt with id %v: %w", id, err) - } - receipts = append(receipts, receipt) - } - return receipts, nil - } -} - -func (r *ExecutionReceipts) Store(receipt *flow.ExecutionReceipt) error { - return operation.RetryOnConflictTx(r.db, transaction.Update, r.storeTx(receipt)) -} - -func (r *ExecutionReceipts) BatchStore(receipt *flow.ExecutionReceipt, batch storage.ReaderBatchWriter) error { - return fmt.Errorf("not implemented") -} - -func (r *ExecutionReceipts) ByID(receiptID flow.Identifier) (*flow.ExecutionReceipt, error) { - tx := r.db.NewTransaction(false) - defer tx.Discard() - return r.byID(receiptID)(tx) -} - -func (r *ExecutionReceipts) ByBlockID(blockID flow.Identifier) (flow.ExecutionReceiptList, error) { - tx := r.db.NewTransaction(false) - defer tx.Discard() - return r.byBlockID(blockID)(tx) -} diff --git a/storage/badger/receipts_test.go b/storage/badger/receipts_test.go deleted file mode 100644 index 03b8420258e..00000000000 --- a/storage/badger/receipts_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestExecutionReceiptsStorage(t *testing.T) { - withStore := func(t *testing.T, f func(store *bstorage.ExecutionReceipts)) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - results := bstorage.NewExecutionResults(metrics, db) - store := bstorage.NewExecutionReceipts(metrics, db, results, bstorage.DefaultCacheSize) - f(store) - }) - } - - t.Run("get empty", func(t *testing.T) { - withStore(t, func(store *bstorage.ExecutionReceipts) { - block := unittest.BlockFixture() - receipts, err := store.ByBlockID(block.ID()) - require.NoError(t, err) - require.Equal(t, 0, len(receipts)) - }) - }) - - t.Run("store one get one", func(t *testing.T) { - withStore(t, func(store *bstorage.ExecutionReceipts) { - block := unittest.BlockFixture() - receipt1 := unittest.ReceiptForBlockFixture(&block) - - err := store.Store(receipt1) - require.NoError(t, err) - - actual, err := store.ByID(receipt1.ID()) - require.NoError(t, err) - - require.Equal(t, receipt1, actual) - - receipts, err := store.ByBlockID(block.ID()) - require.NoError(t, err) - - require.Equal(t, flow.ExecutionReceiptList{receipt1}, receipts) - }) - }) - - t.Run("store two for the same block", func(t *testing.T) { - withStore(t, func(store *bstorage.ExecutionReceipts) { - block := unittest.BlockFixture() - - executor1 := unittest.IdentifierFixture() - executor2 := unittest.IdentifierFixture() - - receipt1 := unittest.ReceiptForBlockExecutorFixture(&block, executor1) - receipt2 := unittest.ReceiptForBlockExecutorFixture(&block, executor2) - - err := store.Store(receipt1) - require.NoError(t, err) - - err = store.Store(receipt2) - require.NoError(t, err) - - receipts, err := store.ByBlockID(block.ID()) - require.NoError(t, err) - - require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt1, receipt2}, receipts) - }) - }) - - t.Run("store two for different blocks", func(t *testing.T) { - withStore(t, func(store *bstorage.ExecutionReceipts) { - block1 := unittest.BlockFixture() - block2 := unittest.BlockFixture() - - executor1 := unittest.IdentifierFixture() - executor2 := unittest.IdentifierFixture() - - receipt1 := unittest.ReceiptForBlockExecutorFixture(&block1, executor1) - receipt2 := unittest.ReceiptForBlockExecutorFixture(&block2, executor2) - - err := store.Store(receipt1) - require.NoError(t, err) - - err = store.Store(receipt2) - require.NoError(t, err) - - receipts1, err := store.ByBlockID(block1.ID()) - require.NoError(t, err) - - receipts2, err := store.ByBlockID(block2.ID()) - require.NoError(t, err) - - require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt1}, receipts1) - require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt2}, receipts2) - }) - }) - - t.Run("indexing duplicated receipts should be ok", func(t *testing.T) { - withStore(t, func(store *bstorage.ExecutionReceipts) { - block1 := unittest.BlockFixture() - - executor1 := unittest.IdentifierFixture() - receipt1 := unittest.ReceiptForBlockExecutorFixture(&block1, executor1) - - err := store.Store(receipt1) - require.NoError(t, err) - - err = store.Store(receipt1) - require.NoError(t, err) - - receipts, err := store.ByBlockID(block1.ID()) - require.NoError(t, err) - - require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt1}, receipts) - }) - }) - - t.Run("indexing receipt from the same executor for same block should succeed", func(t *testing.T) { - withStore(t, func(store *bstorage.ExecutionReceipts) { - block1 := unittest.BlockFixture() - - executor1 := unittest.IdentifierFixture() - - receipt1 := unittest.ReceiptForBlockExecutorFixture(&block1, executor1) - receipt2 := unittest.ReceiptForBlockExecutorFixture(&block1, executor1) - - err := store.Store(receipt1) - require.NoError(t, err) - - err = store.Store(receipt2) - require.NoError(t, err) - - receipts, err := store.ByBlockID(block1.ID()) - require.NoError(t, err) - - require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt1, receipt2}, receipts) - }) - }) -} diff --git a/storage/badger/results.go b/storage/badger/results.go deleted file mode 100644 index 35872f4bc17..00000000000 --- a/storage/badger/results.go +++ /dev/null @@ -1,163 +0,0 @@ -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// ExecutionResults implements persistent storage for execution results. -type ExecutionResults struct { - db *badger.DB - cache *Cache[flow.Identifier, *flow.ExecutionResult] -} - -var _ storage.ExecutionResults = (*ExecutionResults)(nil) - -func NewExecutionResults(collector module.CacheMetrics, db *badger.DB) *ExecutionResults { - - store := func(_ flow.Identifier, result *flow.ExecutionResult) func(*transaction.Tx) error { - return transaction.WithTx(operation.SkipDuplicates(operation.InsertExecutionResult(result))) - } - - retrieve := func(resultID flow.Identifier) func(tx *badger.Txn) (*flow.ExecutionResult, error) { - return func(tx *badger.Txn) (*flow.ExecutionResult, error) { - var result flow.ExecutionResult - err := operation.RetrieveExecutionResult(resultID, &result)(tx) - return &result, err - } - } - - res := &ExecutionResults{ - db: db, - cache: newCache(collector, metrics.ResourceResult, - withLimit[flow.Identifier, *flow.ExecutionResult](flow.DefaultTransactionExpiry+100), - withStore(store), - withRetrieve(retrieve)), - } - - return res -} - -func (r *ExecutionResults) store(result *flow.ExecutionResult) func(*transaction.Tx) error { - return r.cache.PutTx(result.ID(), result) -} - -func (r *ExecutionResults) byID(resultID flow.Identifier) func(*badger.Txn) (*flow.ExecutionResult, error) { - return func(tx *badger.Txn) (*flow.ExecutionResult, error) { - val, err := r.cache.Get(resultID)(tx) - if err != nil { - return nil, err - } - return val, nil - } -} - -func (r *ExecutionResults) byBlockID(blockID flow.Identifier) func(*badger.Txn) (*flow.ExecutionResult, error) { - return func(tx *badger.Txn) (*flow.ExecutionResult, error) { - var resultID flow.Identifier - err := operation.LookupExecutionResult(blockID, &resultID)(tx) - if err != nil { - return nil, fmt.Errorf("could not lookup execution result ID: %w", err) - } - return r.byID(resultID)(tx) - } -} - -func (r *ExecutionResults) index(blockID, resultID flow.Identifier, force bool) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - err := transaction.WithTx(operation.IndexExecutionResult(blockID, resultID))(tx) - if err == nil { - return nil - } - - if !errors.Is(err, storage.ErrAlreadyExists) { - return err - } - - if force { - return transaction.WithTx(operation.ReindexExecutionResult(blockID, resultID))(tx) - } - - // when trying to index a result for a block, and there is already a result indexed for this block, - // double check if the indexed result is the same - var storedResultID flow.Identifier - err = transaction.WithTx(operation.LookupExecutionResult(blockID, &storedResultID))(tx) - if err != nil { - return fmt.Errorf("there is a result stored already, but cannot retrieve it: %w", err) - } - - if storedResultID != resultID { - return fmt.Errorf("storing result that is different from the already stored one for block: %v, storing result: %v, stored result: %v. %w", - blockID, resultID, storedResultID, storage.ErrDataMismatch) - } - - return nil - } -} - -func (r *ExecutionResults) Store(result *flow.ExecutionResult) error { - return operation.RetryOnConflictTx(r.db, transaction.Update, r.store(result)) -} - -func (r *ExecutionResults) BatchStore(result *flow.ExecutionResult, batch storage.ReaderBatchWriter) error { - return fmt.Errorf("not implemented") -} - -func (r *ExecutionResults) BatchIndex(blockID flow.Identifier, resultID flow.Identifier, batch storage.ReaderBatchWriter) error { - return fmt.Errorf("not implemented") -} - -func (r *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) { - tx := r.db.NewTransaction(false) - defer tx.Discard() - return r.byID(resultID)(tx) -} - -func (r *ExecutionResults) ByIDTx(resultID flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error) { - return func(tx *transaction.Tx) (*flow.ExecutionResult, error) { - result, err := r.byID(resultID)(tx.DBTxn) - return result, err - } -} - -func (r *ExecutionResults) Index(blockID flow.Identifier, resultID flow.Identifier) error { - err := operation.RetryOnConflictTx(r.db, transaction.Update, r.index(blockID, resultID, false)) - if err != nil { - return fmt.Errorf("could not index execution result: %w", err) - } - return nil -} - -func (r *ExecutionResults) ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error { - err := operation.RetryOnConflictTx(r.db, transaction.Update, r.index(blockID, resultID, true)) - if err != nil { - return fmt.Errorf("could not index execution result: %w", err) - } - return nil -} - -func (r *ExecutionResults) ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) { - tx := r.db.NewTransaction(false) - defer tx.Discard() - return r.byBlockID(blockID)(tx) -} - -func (r *ExecutionResults) RemoveIndexByBlockID(blockID flow.Identifier) error { - return r.db.Update(operation.SkipNonExist(operation.RemoveExecutionResultIndex(blockID))) -} - -// BatchRemoveIndexByBlockID removes blockID-to-executionResultID index entries keyed by blockID in a provided batch. -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func (r *ExecutionResults) BatchRemoveIndexByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) error { - return fmt.Errorf("not implemented") -} diff --git a/storage/badger/results_test.go b/storage/badger/results_test.go deleted file mode 100644 index 1c8f4386cbe..00000000000 --- a/storage/badger/results_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestResultStoreAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewExecutionResults(metrics, db) - - result := unittest.ExecutionResultFixture() - blockID := unittest.IdentifierFixture() - err := store.Store(result) - require.NoError(t, err) - - err = store.Index(blockID, result.ID()) - require.NoError(t, err) - - actual, err := store.ByBlockID(blockID) - require.NoError(t, err) - - require.Equal(t, result, actual) - }) -} - -func TestResultStoreTwice(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewExecutionResults(metrics, db) - - result := unittest.ExecutionResultFixture() - blockID := unittest.IdentifierFixture() - err := store.Store(result) - require.NoError(t, err) - - err = store.Index(blockID, result.ID()) - require.NoError(t, err) - - err = store.Store(result) - require.NoError(t, err) - - err = store.Index(blockID, result.ID()) - require.NoError(t, err) - }) -} - -func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewExecutionResults(metrics, db) - - result1 := unittest.ExecutionResultFixture() - result2 := unittest.ExecutionResultFixture() - blockID := unittest.IdentifierFixture() - err := store.Store(result1) - require.NoError(t, err) - - err = store.Index(blockID, result1.ID()) - require.NoError(t, err) - - // we can store a different result, but we can't index - // a different result for that block, because it will mean - // one block has two different results. - err = store.Store(result2) - require.NoError(t, err) - - err = store.Index(blockID, result2.ID()) - require.ErrorIs(t, err, storage.ErrDataMismatch) - }) -} - -func TestResultStoreForceIndexOverridesMapping(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewExecutionResults(metrics, db) - - result1 := unittest.ExecutionResultFixture() - result2 := unittest.ExecutionResultFixture() - blockID := unittest.IdentifierFixture() - err := store.Store(result1) - require.NoError(t, err) - err = store.Index(blockID, result1.ID()) - require.NoError(t, err) - - err = store.Store(result2) - require.NoError(t, err) - - // force index - err = store.ForceIndex(blockID, result2.ID()) - require.NoError(t, err) - - // retrieve index to make sure it points to second ER now - byBlockID, err := store.ByBlockID(blockID) - - require.Equal(t, result2, byBlockID) - require.NoError(t, err) - }) -} diff --git a/storage/badger/transaction/deferred_block_persist.go b/storage/badger/transaction/deferred_block_persist.go deleted file mode 100644 index 0d3aadf6c95..00000000000 --- a/storage/badger/transaction/deferred_block_persist.go +++ /dev/null @@ -1,270 +0,0 @@ -package transaction - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// DeferredBlockPersistOp is a shorthand notation for an anonymous function that takes the ID of -// a fully constructed block and a `transaction.Tx` as inputs and runs some database operations -// as part of that transaction. It is a "Promise Pattern", essentially saying: -// once we have the completed the block's construction, we persist data structures that are -// referenced by the block or populate database indices. This pattern is necessary, because -// internally to the protocol_state package we don't have access to the candidate block ID yet because -// we are still determining the protocol state ID for that block. -type DeferredBlockPersistOp func(blockID flow.Identifier, tx *Tx) error - -// noOpPersist intended as constant -var noOpPersist DeferredBlockPersistOp = func(blockID flow.Identifier, tx *Tx) error { return nil } - -// WithBlock adds the still missing block ID information to a `DeferredBlockPersistOp`, thereby converting -// it into a `transaction.DeferredDBUpdate`. -func (d DeferredBlockPersistOp) WithBlock(blockID flow.Identifier) DeferredDBUpdate { - return func(tx *Tx) error { - return d(blockID, tx) - } -} - -// DeferredBlockPersist is a utility for accumulating deferred database interactions that -// are supposed to be executed in one atomic transaction. It supports: -// - Deferred database operations that work directly on Badger transactions. -// - Deferred database operations that work on `transaction.Tx`. -// Tx is a storage-layer abstraction, with support for callbacks that are executed -// after the underlying database transaction completed _successfully_. -// - Deferred database operations that depend on the ID of the block under construction -// and `transaction.Tx`. Especially useful for populating `ByBlockID` indices. -// -// ORDER OF EXECUTION -// We extend the process in which `transaction.Tx` executes database operations, schedules -// callbacks, and executed the callbacks. Specifically, DeferredDbOps proceeds as follows: -// -// 0. Record functors added via `AddBadgerOp`, `AddDbOp`, `OnSucceed` ... -// • some functors may schedule callbacks (depending on their type), which are executed -// after the underlying database transaction completed _successfully_. -// • `OnSucceed` is treated exactly the same way: -// it schedules a callback during its execution, but it has no database actions. -// 1. Execute the functors in the order they were added -// 2. During each functor's execution: -// • some functors may schedule callbacks (depending on their type) -// • record those callbacks in the order they are scheduled (no execution yet) -// 3. If and only if the underlying database transaction succeeds, run the callbacks -// -// DESIGN PATTERN -// - DeferredDbOps is stateful, i.e. it needs to be passed as pointer variable. -// - Do not instantiate Tx directly. Instead, use one of the following -// transaction.Update(db, DeferredDbOps.Pending().WithBlock(blockID)) -// transaction.View(db, DeferredDbOps.Pending().WithBlock(blockID)) -// operation.RetryOnConflictTx(db, transaction.Update, DeferredDbOps.Pending().WithBlock(blockID)) -// -// NOT CONCURRENCY SAFE -type DeferredBlockPersist struct { - isEmpty bool - pending DeferredBlockPersistOp -} - -// NewDeferredBlockPersist instantiates a DeferredBlockPersist. Initially, it behaves like a no-op until functors are added. -func NewDeferredBlockPersist() *DeferredBlockPersist { - return &DeferredBlockPersist{ - isEmpty: true, - pending: noOpPersist, // initially nothing is pending, i.e. no-op - } -} - -// IsEmpty returns true if and only if there are zero pending database operations. -func (d *DeferredBlockPersist) IsEmpty() bool { - if d == nil { - return true - } - return d.isEmpty -} - -// Pending returns a DeferredBlockPersistOp that comprises all database operations and callbacks -// that were added so far. Caution, DeferredBlockPersist keeps its internal state of deferred operations. -// Pending() can be called multiple times, but should only be executed in a database transaction -// once to avoid conflicts. -func (d *DeferredBlockPersist) Pending() DeferredBlockPersistOp { - if d == nil { - return noOpPersist - } - return d.pending -} - -// AddBadgerOp schedules the given DeferredBadgerUpdate to be executed as part of the future transaction. -// For adding multiple DeferredBadgerUpdates, use `AddBadgerOps(ops ...DeferredBadgerUpdate)` if easily possible, as -// it reduces the call stack compared to adding the functors individually via `AddBadgerOp(op DeferredBadgerUpdate)`. -// This method returns a self-reference for chaining. -func (d *DeferredBlockPersist) AddBadgerOp(op DeferredBadgerUpdate) *DeferredBlockPersist { - prior := d.pending - d.pending = func(blockID flow.Identifier, tx *Tx) error { - err := prior(blockID, tx) - if err != nil { - return err - } - err = op(tx.DBTxn) - if err != nil { - return err - } - return nil - } - d.isEmpty = false - return d -} - -// AddBadgerOps schedules the given DeferredBadgerUpdates to be executed as part of the future transaction. -// This method returns a self-reference for chaining. -func (d *DeferredBlockPersist) AddBadgerOps(ops ...DeferredBadgerUpdate) *DeferredBlockPersist { - if len(ops) < 1 { - return d - } - - prior := d.pending - d.pending = func(blockID flow.Identifier, tx *Tx) error { - err := prior(blockID, tx) - if err != nil { - return err - } - for _, op := range ops { - err = op(tx.DBTxn) - if err != nil { - return err - } - } - return nil - } - d.isEmpty = false - return d -} - -// AddDbOp schedules the given DeferredDBUpdate to be executed as part of the future transaction. -// For adding multiple DeferredBadgerUpdates, use `AddDbOps(ops ...DeferredDBUpdate)` if easily possible, as -// it reduces the call stack compared to adding the functors individually via `AddDbOp(op DeferredDBUpdate)`. -// This method returns a self-reference for chaining. -func (d *DeferredBlockPersist) AddDbOp(op DeferredDBUpdate) *DeferredBlockPersist { - prior := d.pending - d.pending = func(blockID flow.Identifier, tx *Tx) error { - err := prior(blockID, tx) - if err != nil { - return err - } - err = op(tx) - if err != nil { - return err - } - return nil - } - d.isEmpty = false - return d -} - -// AddDbOps schedules the given DeferredDBUpdates to be executed as part of the future transaction. -// This method returns a self-reference for chaining. -func (d *DeferredBlockPersist) AddDbOps(ops ...DeferredDBUpdate) *DeferredBlockPersist { - if len(ops) < 1 { - return d - } - - prior := d.pending - d.pending = func(blockID flow.Identifier, tx *Tx) error { - err := prior(blockID, tx) - if err != nil { - return err - } - for _, op := range ops { - err = op(tx) - if err != nil { - return err - } - } - return nil - } - d.isEmpty = false - return d -} - -// AddIndexingOp schedules the given DeferredBlockPersistOp to be executed as part of the future transaction. -// Usually, these are operations to populate some `ByBlockID` index. -// For adding multiple DeferredBlockPersistOps, use `AddIndexingOps(ops ...DeferredBlockPersistOp)` if easily -// possible, as it reduces the call stack compared to adding the functors individually via -// `AddIndexOp(op DeferredBlockPersistOp)`. -// This method returns a self-reference for chaining. -func (d *DeferredBlockPersist) AddIndexingOp(op DeferredBlockPersistOp) *DeferredBlockPersist { - prior := d.pending - d.pending = func(blockID flow.Identifier, tx *Tx) error { - err := prior(blockID, tx) - if err != nil { - return err - } - err = op(blockID, tx) - if err != nil { - return err - } - return nil - } - d.isEmpty = false - return d -} - -// AddIndexingOps schedules the given DeferredBlockPersistOp to be executed as part of the future transaction. -// Usually, these are operations to populate some `ByBlockID` index. -// This method returns a self-reference for chaining. -func (d *DeferredBlockPersist) AddIndexingOps(ops ...DeferredBlockPersistOp) *DeferredBlockPersist { - if len(ops) < 1 { - return d - } - - prior := d.pending - d.pending = func(blockID flow.Identifier, tx *Tx) error { - err := prior(blockID, tx) - if err != nil { - return err - } - for _, op := range ops { - err = op(blockID, tx) - if err != nil { - return err - } - } - return nil - } - d.isEmpty = false - return d -} - -// OnSucceed adds a callback to be executed after the deferred database operations have succeeded. For -// adding multiple callbacks, use `OnSucceeds(callbacks ...func())` if easily possible, as it reduces -// the call stack compared to adding the functors individually via `OnSucceed(callback func())`. -// This method returns a self-reference for chaining. -func (d *DeferredBlockPersist) OnSucceed(callback func()) *DeferredBlockPersist { - prior := d.pending - d.pending = func(blockID flow.Identifier, tx *Tx) error { - err := prior(blockID, tx) - if err != nil { - return err - } - tx.OnSucceed(callback) - return nil - } - d.isEmpty = false - return d -} - -// OnSucceeds adds callbacks to be executed after the deferred database operations have succeeded. -// This method returns a self-reference for chaining. -func (d *DeferredBlockPersist) OnSucceeds(callbacks ...func()) *DeferredBlockPersist { - if len(callbacks) < 1 { - return d - } - - prior := d.pending - d.pending = func(blockID flow.Identifier, tx *Tx) error { - err := prior(blockID, tx) - if err != nil { - return err - } - for _, c := range callbacks { - tx.OnSucceed(c) - } - return nil - } - d.isEmpty = false - return d -} diff --git a/storage/badger/transaction/deferred_block_persist_test.go b/storage/badger/transaction/deferred_block_persist_test.go deleted file mode 100644 index 5f2f476f41d..00000000000 --- a/storage/badger/transaction/deferred_block_persist_test.go +++ /dev/null @@ -1,371 +0,0 @@ -package transaction_test - -import ( - "fmt" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestDeferredBlockPersist(t *testing.T) { - suite.Run(t, new(DeferredBlockPersistSuite)) -} - -type DeferredBlockPersistSuite struct { - suite.Suite -} - -// TestEmpty verifies that DeferredBlockPersist behaves like a no-op if nothing is scheduled -func (s *DeferredBlockPersistSuite) TestEmpty() { - deferredPersistOps := transaction.NewDeferredBlockPersist() - require.True(s.T(), deferredPersistOps.IsEmpty()) - - // NewDeferredBlockPersist.Pending() should be a no-op and therefore not care that transaction.Tx is nil - err := deferredPersistOps.Pending()(unittest.IdentifierFixture(), nil) - require.NoError(s.T(), err) -} - -// Test_AddBadgerOp adds 1 or 2 DeferredBadgerUpdate(s) and verifies that they are executed in the expected order -func (s *DeferredBlockPersistSuite) Test_AddBadgerOp() { - blockID := unittest.IdentifierFixture() - unittest.RunWithBadgerDB(s.T(), func(db *badger.DB) { - s.Run("single DeferredBadgerUpdate", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist().AddBadgerOp(m.MakeBadgerUpdate()) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - - s.Run("two DeferredBadgerUpdates added individually", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist(). - AddBadgerOp(m.MakeBadgerUpdate()). - AddBadgerOp(m.MakeBadgerUpdate()) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - - s.Run("two DeferredBadgerUpdates added as a sequence", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist().AddBadgerOps( - m.MakeBadgerUpdate(), - m.MakeBadgerUpdate()) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - }) -} - -// TestDbOp adds 1 or 2 DeferredDBUpdate(s) and verifies that they are executed in the expected order -func (s *DeferredBlockPersistSuite) Test_AddDbOp() { - blockID := unittest.IdentifierFixture() - unittest.RunWithBadgerDB(s.T(), func(db *badger.DB) { - s.Run("single DeferredDBUpdate without callback", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist(). - AddDbOp(m.MakeDBUpdate(0)) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - - s.Run("single DeferredDBUpdate with one callback", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist(). - AddDbOp(m.MakeDBUpdate(1)) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - - s.Run("single DeferredDBUpdate with multiple callbacks", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist(). - AddDbOp(m.MakeDBUpdate(21)) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - - s.Run("two DeferredDBUpdates added individually", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist(). - AddDbOp(m.MakeDBUpdate(17)). - AddDbOp(m.MakeDBUpdate(0)) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - - s.Run("two DeferredDBUpdates added as a sequence", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist() - deferredPersistOps.AddDbOps( - m.MakeDBUpdate(0), - m.MakeDBUpdate(17)) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - }) -} - -// Test_AddIndexingOp adds 1 or 2 DeferredBlockPersistOp(s) and verifies that they are executed in the expected order -func (s *DeferredBlockPersistSuite) Test_AddIndexingOp() { - blockID := unittest.IdentifierFixture() - unittest.RunWithBadgerDB(s.T(), func(db *badger.DB) { - s.Run("single DeferredBlockPersistOp without callback", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist(). - AddIndexingOp(m.MakeIndexingOp(blockID, 0)) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - - s.Run("single DeferredBlockPersistOp with one callback", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist(). - AddIndexingOp(m.MakeIndexingOp(blockID, 1)) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - - s.Run("single DeferredBlockPersistOp with multiple callbacks", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist(). - AddIndexingOp(m.MakeIndexingOp(blockID, 21)) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - - s.Run("two DeferredBlockPersistOp added individually", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist(). - AddIndexingOp(m.MakeIndexingOp(blockID, 17)). - AddIndexingOp(m.MakeIndexingOp(blockID, 0)) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - - s.Run("two DeferredBlockPersistOp added as a sequence", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist() - deferredPersistOps.AddIndexingOps( - m.MakeIndexingOp(blockID, 0), - m.MakeIndexingOp(blockID, 17)) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - }) -} - -// Test_AddOnSucceedCallback adds 1 or 2 callback(s) and verifies that they are executed in the expected order -func (s *DeferredBlockPersistSuite) Test_AddOnSucceedCallback() { - blockID := unittest.IdentifierFixture() - unittest.RunWithBadgerDB(s.T(), func(db *badger.DB) { - s.Run("single callback", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist(). - OnSucceed(m.MakeCallback()) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - - s.Run("two callbacks added individually", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist(). - OnSucceed(m.MakeCallback()). - OnSucceed(m.MakeCallback()) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - - s.Run("many callbacks added as a sequence", func() { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist(). - OnSucceeds(m.MakeCallbacks(11)...) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) - }) -} - -// Test_EverythingMixed uses all ways to add functors in combination and verifies that they are executed in the expected order -func (s *DeferredBlockPersistSuite) Test_EverythingMixed() { - blockID := unittest.IdentifierFixture() - unittest.RunWithBadgerDB(s.T(), func(db *badger.DB) { - m := NewBlockPersistCallMonitor(s.T()) - deferredPersistOps := transaction.NewDeferredBlockPersist(). - OnSucceed(m.MakeCallback()). - AddDbOp(m.MakeDBUpdate(1)). - AddBadgerOp(m.MakeBadgerUpdate()). - AddIndexingOp(m.MakeIndexingOp(blockID, 2)). - OnSucceeds(m.MakeCallbacks(3)...). - AddDbOp(m.MakeDBUpdate(0)). - AddBadgerOps( - m.MakeBadgerUpdate(), - m.MakeBadgerUpdate(), - m.MakeBadgerUpdate()). - AddIndexingOps( - m.MakeIndexingOp(blockID, 7), - m.MakeIndexingOp(blockID, 0)). - OnSucceeds( - m.MakeCallback(), - m.MakeCallback()). - AddDbOps( - m.MakeDBUpdate(7), - m.MakeDBUpdate(0), - m.MakeDBUpdate(1)). - OnSucceed(m.MakeCallback()) - require.False(s.T(), deferredPersistOps.IsEmpty()) - err := transaction.Update(db, deferredPersistOps.Pending().WithBlock(blockID)) - require.NoError(s.T(), err) - }) -} - -/* ***************************************** Testing Utility BlockPersistCallMonitor ***************************************** */ - -// BlockPersistCallMonitor is a utility for testing that DeferredBlockPersist calls its input functors and callbacks -// in the correct order. DeferredBlockPersist is expected to proceed as follows: -// -// 0. Record functors added via `AddBadgerOp`, `AddDbOp`, `AddIndexingOp`, `OnSucceed` ... -// 1. Execute the functors in the order they were added -// 2. During each functor's execution: -// - some functor's may schedule callbacks (depending on their type) -// - record those callbacks in the order they are scheduled (no execution yet) -// `OnSucceed` schedules its callback during its execution at this step as well -// 3. If and only if the underlying database transaction _successfully_ completed, run the callbacks -// -// To verify the correct order of calls, the BlockPersistCallMonitor generates functors. Each functor has a -// dedicated index value. When the functor is called, it checks that its index matches the functor index -// that the BlockPersistCallMonitor expects to be executed next. For callbacks, we proceed analogously. -// -// Usage note: -// The call BlockPersistCallMonitor assumes that functors are added to DeferredBlockPersist exactly in the order that -// BlockPersistCallMonitor generates them. This works very intuitively, when the tests proceed as in the following example: -// -// m := NewBlockPersistCallMonitor(t) -// deferredPersistOps := transaction.NewDeferredBlockPersist() -// deferredPersistOps.AddBadgerOp(m.MakeBadgerUpdate()) // here, we add the functor right when it is generated -// transaction.Update(db, deferredPersistOps.Pending()) -type BlockPersistCallMonitor struct { - generatedTxFunctors int - generatedCallbacks int - - T *testing.T - nextExpectedTxFunctorIdx int - nextExpectedCallbackIdx int -} - -func NewBlockPersistCallMonitor(t *testing.T) *BlockPersistCallMonitor { - return &BlockPersistCallMonitor{T: t} -} - -func (cm *BlockPersistCallMonitor) MakeIndexingOp(expectedBlockID flow.Identifier, withCallbacks int) transaction.DeferredBlockPersistOp { - myFunctorIdx := cm.generatedTxFunctors // copy into local scope. Determined when we construct functor - callbacks := cm.MakeCallbacks(withCallbacks) // pre-generate callback functors - functor := func(blockID flow.Identifier, tx *transaction.Tx) error { - if expectedBlockID != blockID { - cm.T.Errorf("expected block ID %v but got %v", expectedBlockID, blockID) - return fmt.Errorf("expected block ID %v but got %v", expectedBlockID, blockID) - } - for _, c := range callbacks { - tx.OnSucceed(c) // schedule callback - } - if cm.nextExpectedTxFunctorIdx != myFunctorIdx { - // nextExpectedTxFunctorIdx holds the Index of the Functor that was generated next. DeferredBlockPersist - // should execute the functors in the order they were added, which is violated. Hence, we fail: - cm.T.Errorf("expected next Functor Index is %d but my value is %d", cm.nextExpectedTxFunctorIdx, myFunctorIdx) - return fmt.Errorf("expected next Functor Index is %d but my value is %d", cm.nextExpectedTxFunctorIdx, myFunctorIdx) - } - - // happy path: - cm.nextExpectedTxFunctorIdx += 1 - return nil - } - - cm.generatedTxFunctors += 1 - return functor -} - -func (cm *BlockPersistCallMonitor) MakeDBUpdate(withCallbacks int) transaction.DeferredDBUpdate { - myFunctorIdx := cm.generatedTxFunctors // copy into local scope. Determined when we construct functor - callbacks := cm.MakeCallbacks(withCallbacks) // pre-generate callback functors - functor := func(tx *transaction.Tx) error { - for _, c := range callbacks { - tx.OnSucceed(c) // schedule callback - } - if cm.nextExpectedTxFunctorIdx != myFunctorIdx { - // nextExpectedTxFunctorIdx holds the Index of the Functor that was generated next. DeferredBlockPersist - // should execute the functors in the order they were added, which is violated. Hence, we fail: - cm.T.Errorf("expected next Functor Index is %d but my value is %d", cm.nextExpectedTxFunctorIdx, myFunctorIdx) - return fmt.Errorf("expected next Functor Index is %d but my value is %d", cm.nextExpectedTxFunctorIdx, myFunctorIdx) - } - - // happy path: - cm.nextExpectedTxFunctorIdx += 1 - return nil - } - - cm.generatedTxFunctors += 1 - return functor -} - -func (cm *BlockPersistCallMonitor) MakeBadgerUpdate() transaction.DeferredBadgerUpdate { - myFunctorIdx := cm.generatedTxFunctors // copy into local scope. Determined when we construct functor - functor := func(tx *badger.Txn) error { - if cm.nextExpectedTxFunctorIdx != myFunctorIdx { - // nextExpectedTxFunctorIdx holds the Index of the Functor that was generated next. DeferredBlockPersist - // should execute the functors in the order they were added, which is violated. Hence, we fail: - cm.T.Errorf("expected next Functor Index is %d but my value is %d", cm.nextExpectedTxFunctorIdx, myFunctorIdx) - return fmt.Errorf("expected next Functor Index is %d but my value is %d", cm.nextExpectedTxFunctorIdx, myFunctorIdx) - } - - // happy path: - cm.nextExpectedTxFunctorIdx += 1 - return nil - } - - cm.generatedTxFunctors += 1 - return functor -} - -func (cm *BlockPersistCallMonitor) MakeCallback() func() { - myFunctorIdx := cm.generatedCallbacks // copy into local scope. Determined when we construct callback - functor := func() { - if cm.nextExpectedCallbackIdx != myFunctorIdx { - // nextExpectedCallbackIdx holds the Index of the callback that was generated next. DeferredBlockPersist - // should execute the callback in the order they were scheduled, which is violated. Hence, we fail: - cm.T.Errorf("expected next Callback Index is %d but my value is %d", cm.nextExpectedCallbackIdx, myFunctorIdx) - } - cm.nextExpectedCallbackIdx += 1 // happy path - } - - cm.generatedCallbacks += 1 - return functor -} - -func (cm *BlockPersistCallMonitor) MakeCallbacks(numberCallbacks int) []func() { - callbacks := make([]func(), 0, numberCallbacks) - for ; 0 < numberCallbacks; numberCallbacks-- { - callbacks = append(callbacks, cm.MakeCallback()) - } - return callbacks -} diff --git a/storage/badger/transaction/deferred_update.go b/storage/badger/transaction/deferred_update.go deleted file mode 100644 index 26ad2cc8e89..00000000000 --- a/storage/badger/transaction/deferred_update.go +++ /dev/null @@ -1,188 +0,0 @@ -package transaction - -import ( - "github.com/dgraph-io/badger/v2" -) - -// DeferredDBUpdate is a shorthand notation for an anonymous function that takes -// a `transaction.Tx` as input and runs some database operations as part of that transaction. -type DeferredDBUpdate func(*Tx) error - -// DeferredBadgerUpdate is a shorthand notation for an anonymous function that takes -// a badger transaction as input and runs some database operations as part of that transaction. -type DeferredBadgerUpdate = func(*badger.Txn) error - -// DeferredDbOps is a utility for accumulating deferred database interactions that -// are supposed to be executed in one atomic transaction. It supports: -// - Deferred database operations that work directly on Badger transactions. -// - Deferred database operations that work on `transaction.Tx`. -// Tx is a storage-layer abstraction, with support for callbacks that are executed -// after the underlying database transaction completed _successfully_. -// -// ORDER OF EXECUTION -// We extend the process in which `transaction.Tx` executes database operations, schedules -// callbacks, and executed the callbacks. Specifically, DeferredDbOps proceeds as follows: -// -// 0. Record functors added via `AddBadgerOp`, `AddDbOp`, `OnSucceed` ... -// • some functor's may schedule callbacks (depending on their type), which are executed -// after the underlying database transaction completed _successfully_. -// • `OnSucceed` is treated exactly the same way: -// it schedules a callback during its execution, but it has no database actions. -// 1. Execute the functors in the order they were added -// 2. During each functor's execution: -// • some functor's may schedule callbacks (depending on their type) -// • record those callbacks in the order they are scheduled (no execution yet) -// 3. If and only if the underlying database transaction succeeds, run the callbacks -// -// DESIGN PATTERN -// - DeferredDbOps is stateful, i.e. it needs to be passed as pointer variable. -// - Do not instantiate Tx directly. Instead, use one of the following -// transaction.Update(db, DeferredDbOps.Pending()) -// transaction.View(db, DeferredDbOps.Pending()) -// operation.RetryOnConflictTx(db, transaction.Update, DeferredDbOps.Pending()) -// -// NOT CONCURRENCY SAFE -type DeferredDbOps struct { - pending DeferredDBUpdate -} - -// NewDeferredDbOps instantiates a DeferredDbOps. Initially, it behaves like a no-op until functors are added. -func NewDeferredDbOps() *DeferredDbOps { - return &DeferredDbOps{ - pending: func(tx *Tx) error { return nil }, // initially nothing is pending, i.e. no-op - } -} - -// Pending returns a DeferredDBUpdate that includes all database operations and callbacks -// that were added so far. Caution, DeferredDbOps keeps its internal state of deferred operations. -// Pending() can be called multiple times, but should only be executed in a database transaction -// once to avoid conflicts. -func (d *DeferredDbOps) Pending() DeferredDBUpdate { - return d.pending -} - -// AddBadgerOp schedules the given DeferredBadgerUpdate to be executed as part of the future transaction. -// For adding multiple DeferredBadgerUpdates, use `AddBadgerOps(ops ...DeferredBadgerUpdate)` if easily possible, as -// it reduces the call stack compared to adding the functors individually via `AddBadgerOp(op DeferredBadgerUpdate)`. -// This method returns a self-reference for chaining. -func (d *DeferredDbOps) AddBadgerOp(op DeferredBadgerUpdate) *DeferredDbOps { - prior := d.pending - d.pending = func(tx *Tx) error { - err := prior(tx) - if err != nil { - return err - } - err = op(tx.DBTxn) - if err != nil { - return err - } - return nil - } - return d -} - -// AddBadgerOps schedules the given DeferredBadgerUpdates to be executed as part of the future transaction. -// This method returns a self-reference for chaining. -func (d *DeferredDbOps) AddBadgerOps(ops ...DeferredBadgerUpdate) *DeferredDbOps { - if len(ops) < 1 { - return d - } - - prior := d.pending - d.pending = func(tx *Tx) error { - err := prior(tx) - if err != nil { - return err - } - for _, op := range ops { - err = op(tx.DBTxn) - if err != nil { - return err - } - } - return nil - } - return d -} - -// AddDbOp schedules the given DeferredDBUpdate to be executed as part of the future transaction. -// For adding multiple DeferredBadgerUpdates, use `AddDbOps(ops ...DeferredDBUpdate)` if easily possible, as -// it reduces the call stack compared to adding the functors individually via `AddDbOp(op DeferredDBUpdate)`. -// This method returns a self-reference for chaining. -func (d *DeferredDbOps) AddDbOp(op DeferredDBUpdate) *DeferredDbOps { - prior := d.pending - d.pending = func(tx *Tx) error { - err := prior(tx) - if err != nil { - return err - } - err = op(tx) - if err != nil { - return err - } - return nil - } - return d -} - -// AddDbOps schedules the given DeferredDBUpdates to be executed as part of the future transaction. -// This method returns a self-reference for chaining. -func (d *DeferredDbOps) AddDbOps(ops ...DeferredDBUpdate) *DeferredDbOps { - if len(ops) < 1 { - return d - } - - prior := d.pending - d.pending = func(tx *Tx) error { - err := prior(tx) - if err != nil { - return err - } - for _, op := range ops { - err = op(tx) - if err != nil { - return err - } - } - return nil - } - return d -} - -// OnSucceed adds a callback to be executed after the deferred database operations have succeeded. For -// adding multiple callbacks, use `OnSucceeds(callbacks ...func())` if easily possible, as it reduces -// the call stack compared to adding the functors individually via `OnSucceed(callback func())`. -// This method returns a self-reference for chaining. -func (d *DeferredDbOps) OnSucceed(callback func()) *DeferredDbOps { - prior := d.pending - d.pending = func(tx *Tx) error { - err := prior(tx) - if err != nil { - return err - } - tx.OnSucceed(callback) - return nil - } - return d -} - -// OnSucceeds adds callbacks to be executed after the deferred database operations have succeeded. -// This method returns a self-reference for chaining. -func (d *DeferredDbOps) OnSucceeds(callbacks ...func()) *DeferredDbOps { - if len(callbacks) < 1 { - return d - } - - prior := d.pending - d.pending = func(tx *Tx) error { - err := prior(tx) - if err != nil { - return err - } - for _, c := range callbacks { - tx.OnSucceed(c) - } - return nil - } - return d -} diff --git a/storage/badger/transaction/deferred_update_test.go b/storage/badger/transaction/deferred_update_test.go deleted file mode 100644 index 18125fbdfb0..00000000000 --- a/storage/badger/transaction/deferred_update_test.go +++ /dev/null @@ -1,260 +0,0 @@ -package transaction_test - -import ( - "fmt" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/storage/badger/transaction" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestEmpty verifies that DeferredDbOps behaves like a no-op if nothing is scheduled -func TestEmpty(t *testing.T) { - deferredDbOps := transaction.NewDeferredDbOps() - // deferredDbOps.Pending() should be a no-op and therefore not care that transaction.Tx is nil - err := deferredDbOps.Pending()(nil) - require.NoError(t, err) -} - -// TestAddBaderOp adds 1 or 2 DeferredBadgerUpdate(s) and verifies that they are executed in the expected order -func Test_AddBaderOp(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - t.Run("single DeferredBadgerUpdate", func(t *testing.T) { - m := NewCallMonitor(t) - deferredDbOps := transaction.NewDeferredDbOps(). - AddBadgerOp(m.MakeBadgerUpdate()) - err := transaction.Update(db, deferredDbOps.Pending()) - require.NoError(t, err) - }) - - t.Run("two DeferredBadgerUpdates added individually", func(t *testing.T) { - m := NewCallMonitor(t) - deferredDbOps := transaction.NewDeferredDbOps(). - AddBadgerOp(m.MakeBadgerUpdate()). - AddBadgerOp(m.MakeBadgerUpdate()) - err := transaction.Update(db, deferredDbOps.Pending()) - require.NoError(t, err) - }) - - t.Run("two DeferredBadgerUpdates added as a sequence", func(t *testing.T) { - m := NewCallMonitor(t) - deferredDbOps := transaction.NewDeferredDbOps() - deferredDbOps.AddBadgerOps( - m.MakeBadgerUpdate(), - m.MakeBadgerUpdate()) - err := transaction.Update(db, deferredDbOps.Pending()) - require.NoError(t, err) - }) - }) -} - -// TestDbOp adds 1 or 2 DeferredDBUpdate(s) and verifies that they are executed in the expected order -func Test_AddDbOp(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - t.Run("single DeferredDBUpdate without callback", func(t *testing.T) { - m := NewCallMonitor(t) - deferredDbOps := transaction.NewDeferredDbOps(). - AddDbOp(m.MakeDBUpdate(0)) - err := transaction.Update(db, deferredDbOps.Pending()) - require.NoError(t, err) - }) - - t.Run("single DeferredDBUpdate with one callback", func(t *testing.T) { - m := NewCallMonitor(t) - deferredDbOps := transaction.NewDeferredDbOps(). - AddDbOp(m.MakeDBUpdate(1)) - err := transaction.Update(db, deferredDbOps.Pending()) - require.NoError(t, err) - }) - - t.Run("single DeferredDBUpdate with multiple callbacks", func(t *testing.T) { - m := NewCallMonitor(t) - deferredDbOps := transaction.NewDeferredDbOps(). - AddDbOp(m.MakeDBUpdate(21)) - err := transaction.Update(db, deferredDbOps.Pending()) - require.NoError(t, err) - }) - - t.Run("two DeferredDBUpdates added individually", func(t *testing.T) { - m := NewCallMonitor(t) - deferredDbOps := transaction.NewDeferredDbOps(). - AddDbOp(m.MakeDBUpdate(17)). - AddDbOp(m.MakeDBUpdate(0)) - err := transaction.Update(db, deferredDbOps.Pending()) - require.NoError(t, err) - }) - - t.Run("two DeferredDBUpdates added as a sequence", func(t *testing.T) { - m := NewCallMonitor(t) - deferredDbOps := transaction.NewDeferredDbOps() - deferredDbOps.AddDbOps( - m.MakeDBUpdate(0), - m.MakeDBUpdate(17)) - err := transaction.Update(db, deferredDbOps.Pending()) - require.NoError(t, err) - }) - }) -} - -// Test_AddOnSucceedCallback adds 1 or 2 callback(s) and verifies that they are executed in the expected order -func Test_AddOnSucceedCallback(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - t.Run("single callback", func(t *testing.T) { - m := NewCallMonitor(t) - deferredDbOps := transaction.NewDeferredDbOps(). - OnSucceed(m.MakeCallback()) - err := transaction.Update(db, deferredDbOps.Pending()) - require.NoError(t, err) - }) - - t.Run("two callbacks added individually", func(t *testing.T) { - m := NewCallMonitor(t) - deferredDbOps := transaction.NewDeferredDbOps(). - OnSucceed(m.MakeCallback()). - OnSucceed(m.MakeCallback()) - err := transaction.Update(db, deferredDbOps.Pending()) - require.NoError(t, err) - }) - - t.Run("many callbacks added as a sequence", func(t *testing.T) { - m := NewCallMonitor(t) - deferredDbOps := transaction.NewDeferredDbOps(). - OnSucceeds(m.MakeCallbacks(11)...) - err := transaction.Update(db, deferredDbOps.Pending()) - require.NoError(t, err) - }) - }) -} - -// Test_EverythingMixed uses all ways to add functors in combination and verifies that they are executed in the expected order -func Test_EverythingMixed(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - m := NewCallMonitor(t) - deferredDbOps := transaction.NewDeferredDbOps(). - OnSucceed(m.MakeCallback()). - AddDbOp(m.MakeDBUpdate(1)). - AddBadgerOp(m.MakeBadgerUpdate()). - OnSucceeds(m.MakeCallbacks(3)...). - AddDbOp(m.MakeDBUpdate(0)). - AddBadgerOps( - m.MakeBadgerUpdate(), - m.MakeBadgerUpdate(), - m.MakeBadgerUpdate()). - OnSucceeds( - m.MakeCallback(), - m.MakeCallback()). - AddDbOps( - m.MakeDBUpdate(7), - m.MakeDBUpdate(0), - m.MakeDBUpdate(1)). - OnSucceed(m.MakeCallback()) - err := transaction.Update(db, deferredDbOps.Pending()) - require.NoError(t, err) - }) -} - -/* ***************************************** Testing Utility CallMonitor ***************************************** */ - -// CallMonitor is a utility for testing that DeferredDbOps calls its input functors and callbacks -// in the correct order. DeferredDbOps is expected to proceed as follows: -// -// 0. Record functors added via `AddBadgerOp`, `AddDbOp`, `OnSucceed` ... -// 1. Execute the functors in the order they were added -// 2. During each functor's execution: -// - some functor's may schedule callbacks (depending on their type) -// - record those callbacks in the order they are scheduled (no execution yet) -// `OnSucceed` schedules its callback during its execution at this step as well -// 3. If and only if the underlying database transaction _successfully_ completed, run the callbacks -// -// To verify the correct order of calls, the CallMonitor generates functors. Each functor has a -// dedicated index value. When the functor is called, it checks that its index matches the functor index -// that the CallMonitor expects to be executed next. For callbacks, we proceed analogously. -// -// Usage note: -// The call CallMonitor assumes that functors are added to DeferredDbOps exactly in the order that -// CallMonitor generates them. This works very intuitively, when the tests proceed as in the following example: -// -// m := NewCallMonitor(t) -// deferredDbOps := transaction.NewDeferredDbOps() -// deferredDbOps.AddBadgerOp(m.MakeBadgerUpdate()) // here, we add the functor right when it is generated -// transaction.Update(db, deferredDbOps.Pending()) -type CallMonitor struct { - generatedTxFunctors int - generatedCallbacks int - - T *testing.T - nextExpectedTxFunctorIdx int - nextExpectedCallbackIdx int -} - -func NewCallMonitor(t *testing.T) *CallMonitor { - return &CallMonitor{T: t} -} - -func (cm *CallMonitor) MakeDBUpdate(withCallbacks int) transaction.DeferredDBUpdate { - myFunctorIdx := cm.generatedTxFunctors // copy into local scope. Determined when we construct functor - callbacks := cm.MakeCallbacks(withCallbacks) // pre-generate callback functors - functor := func(tx *transaction.Tx) error { - for _, c := range callbacks { - tx.OnSucceed(c) // schedule callback - } - if cm.nextExpectedTxFunctorIdx != myFunctorIdx { - // nextExpectedTxFunctorIdx holds the Index of the Functor that was generated next. DeferredDbOps - // should execute the functors in the order they were added, which is violated. Hence, we fail: - cm.T.Errorf("expected next Functor Index is %d but my value is %d", cm.nextExpectedTxFunctorIdx, myFunctorIdx) - return fmt.Errorf("expected next Functor Index is %d but my value is %d", cm.nextExpectedTxFunctorIdx, myFunctorIdx) - } - - // happy path: - cm.nextExpectedTxFunctorIdx += 1 - return nil - } - - cm.generatedTxFunctors += 1 - return functor -} - -func (cm *CallMonitor) MakeBadgerUpdate() transaction.DeferredBadgerUpdate { - myFunctorIdx := cm.generatedTxFunctors // copy into local scope. Determined when we construct functor - functor := func(tx *badger.Txn) error { - if cm.nextExpectedTxFunctorIdx != myFunctorIdx { - // nextExpectedTxFunctorIdx holds the Index of the Functor that was generated next. DeferredDbOps - // should execute the functors in the order they were added, which is violated. Hence, we fail: - cm.T.Errorf("expected next Functor Index is %d but my value is %d", cm.nextExpectedTxFunctorIdx, myFunctorIdx) - return fmt.Errorf("expected next Functor Index is %d but my value is %d", cm.nextExpectedTxFunctorIdx, myFunctorIdx) - } - - // happy path: - cm.nextExpectedTxFunctorIdx += 1 - return nil - } - - cm.generatedTxFunctors += 1 - return functor -} - -func (cm *CallMonitor) MakeCallback() func() { - myFunctorIdx := cm.generatedCallbacks // copy into local scope. Determined when we construct callback - functor := func() { - if cm.nextExpectedCallbackIdx != myFunctorIdx { - // nextExpectedCallbackIdx holds the Index of the callback that was generated next. DeferredDbOps - // should execute the callback in the order they were scheduled, which is violated. Hence, we fail: - cm.T.Errorf("expected next Callback Index is %d but my value is %d", cm.nextExpectedCallbackIdx, myFunctorIdx) - } - cm.nextExpectedCallbackIdx += 1 // happy path - } - - cm.generatedCallbacks += 1 - return functor -} - -func (cm *CallMonitor) MakeCallbacks(numberCallbacks int) []func() { - callbacks := make([]func(), 0, numberCallbacks) - for ; 0 < numberCallbacks; numberCallbacks-- { - callbacks = append(callbacks, cm.MakeCallback()) - } - return callbacks -} diff --git a/storage/badger/transactions.go b/storage/badger/transactions.go deleted file mode 100644 index eeca9c9477e..00000000000 --- a/storage/badger/transactions.go +++ /dev/null @@ -1,68 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// Transactions ... -type Transactions struct { - db *badger.DB - cache *Cache[flow.Identifier, *flow.TransactionBody] -} - -// NewTransactions ... -func NewTransactions(cacheMetrics module.CacheMetrics, db *badger.DB) *Transactions { - store := func(txID flow.Identifier, flowTX *flow.TransactionBody) func(*transaction.Tx) error { - return transaction.WithTx(operation.SkipDuplicates(operation.InsertTransaction(txID, flowTX))) - } - - retrieve := func(txID flow.Identifier) func(tx *badger.Txn) (*flow.TransactionBody, error) { - return func(tx *badger.Txn) (*flow.TransactionBody, error) { - var flowTx flow.TransactionBody - err := operation.RetrieveTransaction(txID, &flowTx)(tx) - return &flowTx, err - } - } - - t := &Transactions{ - db: db, - cache: newCache[flow.Identifier, *flow.TransactionBody](cacheMetrics, metrics.ResourceTransaction, - withLimit[flow.Identifier, *flow.TransactionBody](flow.DefaultTransactionExpiry+100), - withStore(store), - withRetrieve(retrieve)), - } - - return t -} - -// Store ... -func (t *Transactions) Store(flowTx *flow.TransactionBody) error { - return operation.RetryOnConflictTx(t.db, transaction.Update, t.storeTx(flowTx)) -} - -// ByID ... -func (t *Transactions) ByID(txID flow.Identifier) (*flow.TransactionBody, error) { - tx := t.db.NewTransaction(false) - defer tx.Discard() - return t.retrieveTx(txID)(tx) -} - -func (t *Transactions) storeTx(flowTx *flow.TransactionBody) func(*transaction.Tx) error { - return t.cache.PutTx(flowTx.ID(), flowTx) -} - -func (t *Transactions) retrieveTx(txID flow.Identifier) func(*badger.Txn) (*flow.TransactionBody, error) { - return func(tx *badger.Txn) (*flow.TransactionBody, error) { - val, err := t.cache.Get(txID)(tx) - if err != nil { - return nil, err - } - return val, err - } -} diff --git a/storage/badger/transactions_test.go b/storage/badger/transactions_test.go deleted file mode 100644 index 8d47e5d75af..00000000000 --- a/storage/badger/transactions_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -func TestTransactionStoreRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewTransactions(metrics, db) - - // store a transaction in db - expected := unittest.TransactionFixture() - err := store.Store(&expected.TransactionBody) - require.NoError(t, err) - - // retrieve the transaction by ID - actual, err := store.ByID(expected.ID()) - require.NoError(t, err) - assert.Equal(t, &expected.TransactionBody, actual) - - // re-insert the transaction - should be idempotent - err = store.Store(&expected.TransactionBody) - require.NoError(t, err) - }) -} - -func TestTransactionRetrieveWithoutStore(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewTransactions(metrics, db) - - // attempt to get a invalid transaction - _, err := store.ByID(unittest.IdentifierFixture()) - assert.ErrorIs(t, err, storage.ErrNotFound) - }) -} diff --git a/storage/blocks.go b/storage/blocks.go index 829089ee761..a4e553bdd88 100644 --- a/storage/blocks.go +++ b/storage/blocks.go @@ -1,32 +1,52 @@ package storage import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" ) // Blocks represents persistent storage for blocks. type Blocks interface { - // Store will atomically store a block with all its dependencies. - Store(block *flow.Block) error - - // StoreTx allows us to store a new block, including its payload & header, as part of a DB transaction, while - // still going through the caching layer. - StoreTx(block *flow.Block) func(*transaction.Tx) error + // BatchStore stores a valid block in a batch. + BatchStore(lctx lockctx.Proof, rw ReaderBatchWriter, block *flow.Block) error // ByID returns the block with the given hash. It is available for - // finalized and ambiguous blocks. + // finalized and pending blocks. + // Expected errors during normal operations: + // - storage.ErrNotFound if no block is found ByID(blockID flow.Identifier) (*flow.Block, error) - // ByHeight returns the block at the given height. It is only available - // for finalized blocks. + // ByHeight returns the block at the given height. It is only available for finalized blocks. + // + // Expected errors during normal operations: + // - storage.ErrNotFound if no block is found for the given height ByHeight(height uint64) (*flow.Block, error) - // ByCollectionID returns the block for the given collection ID. + // ByView returns the block with the given view. It is only available for certified blocks. + // Certified blocks are the blocks that have received a QC. Hotstuff guarantees that for each view, + // at most one block is certified. Hence, the return value of `ByView` is guaranteed to be unique + // even for non-finalized blocks. + // Expected errors during normal operations: + // - `storage.ErrNotFound` if no certified block is known at given view. + // + // TODO: this method is not available until next spork (mainnet27) or a migration that builds the index. + // ByView(view uint64) (*flow.Header, error) + + // ByCollectionID returns the *finalized** block that contains the collection with the given ID. + // + // Expected errors during normal operations: + // - storage.ErrNotFound if finalized block is known that contains the collection ByCollectionID(collID flow.Identifier) (*flow.Block, error) - // IndexBlockForCollections indexes the block each collection was - // included in. + // IndexBlockForCollections indexes the block each collection was included in. + // CAUTION: a collection can be included in multiple *unfinalized* blocks. However, the implementation + // assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY + // *and* only in the absence of byzantine collector clusters (which the mature protocol must tolerate). + // Hence, this function should be treated as a temporary solution, which requires generalization + // (one-to-many mapping) for soft finality and the mature protocol. + // + // No errors expected during normal operation. IndexBlockForCollections(blockID flow.Identifier, collIDs []flow.Identifier) error } diff --git a/storage/cluster_blocks.go b/storage/cluster_blocks.go index ca5a3466b87..bbb1d5f9954 100644 --- a/storage/cluster_blocks.go +++ b/storage/cluster_blocks.go @@ -7,9 +7,6 @@ import ( type ClusterBlocks interface { - // Store stores the cluster block. - Store(block *cluster.Block) error - // ByID returns the block with the given ID. ByID(blockID flow.Identifier) (*cluster.Block, error) diff --git a/storage/cluster_payloads.go b/storage/cluster_payloads.go index 7d80d3a9e2e..5cf92194380 100644 --- a/storage/cluster_payloads.go +++ b/storage/cluster_payloads.go @@ -9,9 +9,6 @@ import ( // node cluster consensus. type ClusterPayloads interface { - // Store stores and indexes the given cluster payload. - Store(blockID flow.Identifier, payload *cluster.Payload) error - // ByBlockID returns the cluster payload for the given block ID. ByBlockID(blockID flow.Identifier) (*cluster.Payload, error) } diff --git a/storage/collections.go b/storage/collections.go index 81379b0afd4..8e5cca0394b 100644 --- a/storage/collections.go +++ b/storage/collections.go @@ -1,6 +1,8 @@ package storage import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" ) @@ -13,15 +15,16 @@ type CollectionsReader interface { // - `storage.ErrNotFound` if no light collection was found. ByID(collID flow.Identifier) (*flow.Collection, error) - // LightByID returns collection with the given ID. Only retrieves - // transaction hashes. + // LightByID returns a reduced representation of the collection with the given ID. + // The reduced collection references the constituent transactions by their hashes. // // Expected errors during normal operation: // - `storage.ErrNotFound` if no light collection was found. LightByID(collID flow.Identifier) (*flow.LightCollection, error) - // LightByTransactionID returns the collection for the given transaction ID. Only retrieves - // transaction hashes. + // LightByTransactionID returns a reduced representation of the collection + // holding the given transaction ID. The reduced collection references the + // constituent transactions by their hashes. // // Expected errors during normal operation: // - `storage.ErrNotFound` if no light collection was found. @@ -34,26 +37,36 @@ type Collections interface { // Store inserts the collection keyed by ID and all constituent // transactions. + // This is used by execution node storing collections. // No errors are expected during normal operation. - Store(collection *flow.Collection) error + Store(collection *flow.Collection) (flow.LightCollection, error) // Remove removes the collection and all constituent transactions. // No errors are expected during normal operation. Remove(collID flow.Identifier) error - // StoreLightAndIndexByTransaction inserts the light collection (only - // transaction IDs) and adds a transaction id index for each of the - // transactions within the collection (transaction_id->collection_id). + // StoreAndIndexByTransaction stores the collection and indexes it by transaction. + // This is used by access node storing collections for finalized blocks. // - // NOTE: Currently it is possible in rare circumstances for two collections - // to be guaranteed which both contain the same transaction (see https://github.com/dapperlabs/flow-go/issues/3556). - // The second of these will revert upon reaching the execution node, so - // this doesn't impact the execution state, but it can result in the Access - // node processing two collections which both contain the same transaction (see https://github.com/dapperlabs/flow-go/issues/5337). - // To handle this, we skip indexing the affected transaction when inserting - // the transaction_id->collection_id index when an index for the transaction - // already exists. + // CAUTION: current approach is NOT BFT and needs to be revised in the future. + // Honest clusters ensure a transaction can only belong to one collection. However, in rare + // cases, the collector clusters can exceed byzantine thresholds -- making it possible to + // produce multiple finalized collections (aka guaranteed collections) containing the same + // transaction repeatedly. + // TODO: eventually we need to handle Byzantine clusters // // No errors are expected during normal operation. - StoreLightAndIndexByTransaction(collection *flow.LightCollection) error + StoreAndIndexByTransaction(lctx lockctx.Proof, collection *flow.Collection) (flow.LightCollection, error) + + // BatchStoreAndIndexByTransaction stores the collection and indexes it by transaction within a batch. + // + // CAUTION: current approach is NOT BFT and needs to be revised in the future. + // Honest clusters ensure a transaction can only belong to one collection. However, in rare + // cases, the collector clusters can exceed byzantine thresholds -- making it possible to + // produce multiple finalized collections (aka guaranteed collections) containing the same + // transaction repeatedly. + // TODO: eventually we need to handle Byzantine clusters + // + // This is used by access node storing collections for finalized blocks + BatchStoreAndIndexByTransaction(lctx lockctx.Proof, collection *flow.Collection, batch ReaderBatchWriter) (flow.LightCollection, error) } diff --git a/storage/commits.go b/storage/commits.go index 1c8c0a25d3c..0adbfecf3e1 100644 --- a/storage/commits.go +++ b/storage/commits.go @@ -1,6 +1,8 @@ package storage import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" ) @@ -13,16 +15,13 @@ type CommitsReader interface { type Commits interface { CommitsReader - // Store will store a commit in the persistent storage. - Store(blockID flow.Identifier, commit flow.StateCommitment) error - // BatchStore stores Commit keyed by blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. - // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. - BatchStore(blockID flow.Identifier, commit flow.StateCommitment, batch ReaderBatchWriter) error + // If the database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. + BatchStore(lctx lockctx.Proof, blockID flow.Identifier, commit flow.StateCommitment, batch ReaderBatchWriter) error // BatchRemoveByBlockID removes Commit keyed by blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. - // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. + // If the database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. BatchRemoveByBlockID(blockID flow.Identifier, batch ReaderBatchWriter) error } diff --git a/storage/consumer_progress.go b/storage/consumer_progress.go index 9655bd8a95c..0a3d6b327f6 100644 --- a/storage/consumer_progress.go +++ b/storage/consumer_progress.go @@ -13,10 +13,17 @@ type ConsumerProgressInitializer interface { // It must be created by the ConsumerProgressInitializer, so that it can guarantee // the ProcessedIndex and SetProcessedIndex methods are safe to use. type ConsumerProgress interface { - // read the current processed index - // any error returned are exceptions + // ProcessedIndex returns the processed index for the consumer + // No errors are expected during normal operation ProcessedIndex() (uint64, error) - // update the processed index in the storage layer. - // any error returned are exceptions + + // SetProcessedIndex updates the processed index for the consumer + // The caller must use ConsumerProgressInitializer to initialize the progress index in storage + // No errors are expected during normal operation SetProcessedIndex(processed uint64) error + + // BatchSetProcessedIndex updates the processed index for the consumer within in provided batch + // The caller must use ConsumerProgressInitializer to initialize the progress index in storage + // No errors are expected during normal operation + BatchSetProcessedIndex(processed uint64, batch ReaderBatchWriter) error } diff --git a/storage/deferred/operations.go b/storage/deferred/operations.go new file mode 100644 index 00000000000..ed13cae839e --- /dev/null +++ b/storage/deferred/operations.go @@ -0,0 +1,101 @@ +package deferred + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// DBOp is a shorthand for a deferred database operation that works within a lock-protected context. +// It accepts a lock proof, a block ID, and a reader/writer interface to perform its task. +// This pattern allows chaining database updates for atomic execution in one batch updates. +type DBOp = func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error + +// DeferredBlockPersist accumulates deferred database operations to be executed later in a single atomic batch update. +// Specifically, we defer appending writes and success-callbacks to a [storage.ReaderBatchWriter]. +// Operations for appending writes and success-callbacks are executed in the order in which they were queued. +// Since Pebble does not provide serializable snapshot isolation, callers MUST ensure that the necessary locks are +// acquired before executing the set of deferred operations. +// +// This construct accomplishes two distinct goals: +// 1. Deferring block indexing write operations when the block ID is not yet known. +// 2. Deferring lock-requiring read-then-write operations to minimize time spent holding a lock. +// +// NOT CONCURRENCY SAFE +type DeferredBlockPersist struct { + pending DBOp // Holds the accumulated operations as a single composed function. Can be nil if no ops are added. +} + +// NewDeferredBlockPersist instantiates a DeferredBlockPersist instance. Initially, it behaves as a no-op until operations are added. +func NewDeferredBlockPersist() *DeferredBlockPersist { + return &DeferredBlockPersist{ + pending: nil, + } +} + +// IsEmpty returns true if no operations have been enqueued. +func (d *DeferredBlockPersist) IsEmpty() bool { + return d.pending == nil +} + +// AddNextOperation adds a new deferred database operation to the queue of pending operations. +// If there are already pending operations, this new operation will be composed to run after them. +// This method ensures the operations execute sequentially and abort on the first error. +// +// If `nil` is passed, it is ignored — this might happen if chaining with an empty DeferredBlockPersist. +func (d *DeferredBlockPersist) AddNextOperation(nextOperation DBOp) { + if nextOperation == nil { + // No-op if the provided operation is nil. + return + } + + if d.pending == nil { + // If this is the first operation being added, set it directly. + d.pending = nextOperation + return + } + + // Compose the prior and next operations into a single function. + prior := d.pending + d.pending = func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + // Execute the prior operations first. + if err := prior(lctx, blockID, rw); err != nil { + return err + } + // Execute the newly added operation next. + if err := nextOperation(lctx, blockID, rw); err != nil { + return err + } + return nil + } +} + +// Chain merges the deferred operations from another DeferredBlockPersist into this one. +// The resulting order of operations is: +// 1. execute the operations in the receiver in the order they were added +// 2. execute the operations from the input in the order they were added +func (d *DeferredBlockPersist) Chain(deferred *DeferredBlockPersist) { + d.AddNextOperation(deferred.pending) +} + +// AddSucceedCallback adds a callback to be executed **after** the pending database operations succeed. +// This is useful for registering indexing tasks or post-commit hooks. +// The callback is only invoked if no error occurred during batch updates execution. +func (d *DeferredBlockPersist) AddSucceedCallback(callback func()) { + d.AddNextOperation(func(_ lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + // Schedule the callback to run after a successful commit. + storage.OnCommitSucceed(rw, callback) + return nil + }) +} + +// Execute runs all the accumulated deferred database operations in-order. +// If no operations were added, it is effectively a no-op. +// This method should be called exactly once per batch updates context. +func (d *DeferredBlockPersist) Execute(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + if d.pending == nil { + return nil // No operations to execute. + } + return d.pending(lctx, blockID, rw) +} diff --git a/storage/deferred/operations_test.go b/storage/deferred/operations_test.go new file mode 100644 index 00000000000..a2f87469d5b --- /dev/null +++ b/storage/deferred/operations_test.go @@ -0,0 +1,300 @@ +package deferred_test + +import ( + "errors" + "fmt" + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/deferred" + "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/dbtest" +) + +// TestNewDeferredBlockPersist verifies that a newly created DeferredBlockPersist instance is empty and not nil. +func TestNewDeferredBlockPersist(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + assert.NotNil(t, d) + assert.True(t, d.IsEmpty()) + }) +} + +// TestDeferredBlockPersist_IsEmpty verifies the working of `DeferredBlockPersist.IsEmpty` method. +func TestDeferredBlockPersist_IsEmpty(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + assert.True(t, d.IsEmpty()) + + d.AddNextOperation(func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return nil + }) + assert.False(t, d.IsEmpty()) + }) +} + +// TestDeferredBlockPersist_AddNextOperation_Nil verifies that adding a nil operation does +// not change the state of the DeferredBlockPersist. +func TestDeferredBlockPersist_AddNextOperation_Nil(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + d.AddNextOperation(nil) + assert.True(t, d.IsEmpty()) + }) +} + +// TestDeferredBlockPersist_Execute_NoOps verifies that executing an empty DeferredBlockPersist is a no-op. +func TestDeferredBlockPersist_Execute_NoOps(t *testing.T) { + rw := mock.NewReaderBatchWriter(t) // mock errors on any function call + d := deferred.NewDeferredBlockPersist() + err := d.Execute(nil, flow.Identifier{}, rw) + assert.NoError(t, err) +} + +// TestDeferredBlockPersist_AddNextOperation_Single verifies that a single operation can be added and executed. +func TestDeferredBlockPersist_AddNextOperation_Single(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + var executed bool + op := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + executed = true + return nil + } + + d.AddNextOperation(op) + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + return d.Execute(nil, flow.Identifier{}, writer) + }) + + require.NoError(t, err) + assert.True(t, executed) + }) +} + +// TestDeferredBlockPersist_AddNextOperation_Multiple verifies that: +// - multiple operations can be added +// - operations are executed in the order they were added +func TestDeferredBlockPersist_AddNextOperation_Multiple(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + var executionOrder []int + + op1 := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + executionOrder = append(executionOrder, 1) + return nil + } + op2 := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + executionOrder = append(executionOrder, 2) + return nil + } + + d.AddNextOperation(op1) + d.AddNextOperation(op2) + + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + return d.Execute(nil, flow.Identifier{}, writer) + }) + + require.NoError(t, err) + assert.Equal(t, []int{1, 2}, executionOrder) + }) +} + +// TestDeferredBlockPersist_AddNextOperation_Error verifies that if an operation returns an error, +// subsequent operations are not executed and the error is returned. +func TestDeferredBlockPersist_AddNextOperation_Error(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + var op2Executed bool + testErr := errors.New("test error") + + op1 := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return fmt.Errorf("aborting: %w", testErr) + } + op2 := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + op2Executed = true + return nil + } + + d.AddNextOperation(op1) + d.AddNextOperation(op2) + + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + return d.Execute(nil, flow.Identifier{}, writer) + }) + + require.Error(t, err) + assert.ErrorIs(t, err, testErr) + assert.False(t, op2Executed) + }) +} + +// TestDeferredBlockPersist_Chain verifies that chaining two DeferredBlockPersist: +// - executes all operations from both instances +// - maintains the order of operations (first operations from receiver, then from chained instance) +func TestDeferredBlockPersist_Chain(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + var executionOrder []int + + d1 := deferred.NewDeferredBlockPersist() + d1op1 := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + executionOrder = append(executionOrder, 1) + return nil + } + d1.AddNextOperation(d1op1) + + d2 := deferred.NewDeferredBlockPersist() + d2op1 := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + executionOrder = append(executionOrder, 2) + return nil + } + d2.AddNextOperation(d2op1) + + d1.Chain(d2) + + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + return d1.Execute(nil, flow.Identifier{}, writer) + }) + require.NoError(t, err) + assert.Equal(t, []int{1, 2}, executionOrder) + }) +} + +// TestDeferredBlockPersist_Chain_Empty verifies that chaining involving an empty DeferredBlockPersist works +func TestDeferredBlockPersist_Chain_Empty(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + t.Run("non-empty receiver chaining an empty DeferredBlockPersist", func(t *testing.T) { + d := deferred.NewDeferredBlockPersist() + var opExecuted bool + op := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + opExecuted = true + return nil + } + d.AddNextOperation(op) + + empty := deferred.NewDeferredBlockPersist() + d.Chain(empty) + + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + return d.Execute(nil, flow.Identifier{}, writer) + }) + require.NoError(t, err) + assert.True(t, opExecuted) + }) + + t.Run("empty receiver chaining an non-empty DeferredBlockPersist", func(t *testing.T) { + d := deferred.NewDeferredBlockPersist() + var opExecuted bool + op := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + opExecuted = true + return nil + } + d.AddNextOperation(op) + + empty := deferred.NewDeferredBlockPersist() + empty.Chain(d) + + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + return empty.Execute(nil, flow.Identifier{}, writer) + }) + require.NoError(t, err) + assert.True(t, opExecuted) + }) + }) +} + +// TestDeferredBlockPersist_AddSucceedCallback verifies that a callback is executed when commiting the `ReaderBatchWriter` +func TestDeferredBlockPersist_AddSucceedCallback(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + var callbackExecuted bool + callback := func() { + callbackExecuted = true + } + d.AddSucceedCallback(callback) + + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + // Upon running the deferred operations, the callback should be registered with the writer. However, the + // callback should not be executed yet, as the writer will only be committed once we return from this function. + err := d.Execute(nil, flow.Identifier{}, writer) + require.NoError(t, err) + assert.False(t, callbackExecuted) + return nil + }) // WithReaderBatchWriter commits the batch at the end, which should have triggered the callback + require.NoError(t, err) + assert.True(t, callbackExecuted) + }) +} + +// TestDeferredBlockPersist_AddSucceedCallback_Error verifies that if an error occurs when committing the batch, +// the success callback is not executed. +func TestDeferredBlockPersist_AddSucceedCallback_Error(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + var callbackExecuted bool + callback := func() { + callbackExecuted = true + } + d.AddSucceedCallback(callback) + + testErr := errors.New("test error") + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + // Execute the deferred operation, which registers the success callback with the writer. However, the + // callback should not be executed yet, as the writer will only be committed once we return from this function. + err := d.Execute(nil, flow.Identifier{}, writer) + require.NoError(t, err) + assert.False(t, callbackExecuted) + + // Return an error from the transaction block to simulate a failed transaction. + return fmt.Errorf("abort: %w", testErr) + }) // WithReaderBatchWriter commits the batch at the end, which should have triggered the callback + + // The error from the transaction should be the one we returned. + require.Error(t, err) + assert.ErrorIs(t, err, testErr) + + // Because the transaction failed, the success callback should not have been executed. + assert.False(t, callbackExecuted) + }) +} + +// TestDeferredBlockPersist_Add_Operation_and_Callback verifies that +// a deferred operation and a callback can be added +func TestDeferredBlockPersist_Add_Operation_and_Callback(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + var opExecuted bool + var callbackExecuted bool + + op := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + opExecuted = true + return nil + } + callback := func() { + callbackExecuted = true + } + + d.AddNextOperation(op) + d.AddSucceedCallback(callback) + + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + // When composing the final write batch, the deferred operations should be run and the callback should + // be registered with the writer. However, the callback should not be executed yet, as the writer will + // only be committed once we return from this function. + err := d.Execute(nil, flow.Identifier{}, writer) + require.NoError(t, err) + assert.True(t, opExecuted) + assert.False(t, callbackExecuted) + return nil + }) // WithReaderBatchWriter commits the batch at the end, which should have triggered the callback + + require.NoError(t, err) + assert.True(t, opExecuted) + assert.True(t, callbackExecuted) + }) +} diff --git a/storage/epoch_commits.go b/storage/epoch_commits.go index 5dffa581a3a..b555aa70e5a 100644 --- a/storage/epoch_commits.go +++ b/storage/epoch_commits.go @@ -2,13 +2,13 @@ package storage import ( "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" ) type EpochCommits interface { - // StoreTx allows us to store a new epoch commit in a DB transaction while updating the cache. - StoreTx(commit *flow.EpochCommit) func(*transaction.Tx) error + // BatchStore allows us to store a new epoch commit in a DB batch update while updating the cache. + // No errors are expected during normal operation. + BatchStore(rw ReaderBatchWriter, commit *flow.EpochCommit) error // ByID will return the EpochCommit event by its ID. // Error returns: diff --git a/storage/epoch_protocol_state.go b/storage/epoch_protocol_state.go index a26264e025b..2e771772381 100644 --- a/storage/epoch_protocol_state.go +++ b/storage/epoch_protocol_state.go @@ -2,24 +2,20 @@ package storage import ( "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" ) // EpochProtocolStateEntries represents persistent, fork-aware storage for the Epoch-related // sub-state of the overall of the overall Protocol State (KV Store). type EpochProtocolStateEntries interface { - // StoreTx returns an anonymous function (intended to be executed as part of a badger transaction), - // which persists the given epoch sub-state as part of a DB tx. Per convention, the identities in - // the Protocol State must be in canonical order for the current and next epoch (if present), + // BatchStore persists the given epoch protocol state entry as part of a DB batch. Per convention, the identities in + // the flow.MinEpochStateEntry must be in canonical order for the current and next epoch (if present), // otherwise an exception is returned. - // Expected errors of the returned anonymous function: - // - storage.ErrAlreadyExists if an epoch sub-state with the given id is already stored - StoreTx(epochProtocolStateID flow.Identifier, epochProtocolStateEntry *flow.MinEpochStateEntry) func(*transaction.Tx) error + // No errors are expected during normal operation. + BatchStore(w Writer, epochProtocolStateID flow.Identifier, epochProtocolStateEntry *flow.MinEpochStateEntry) error - // Index returns an anonymous function that is intended to be executed as part of a database transaction. - // In a nutshell, we want to maintain a map from `blockID` to `epochProtocolStateID`, where `blockID` references the - // block that _proposes_ the epoch sub-state. - // Upon call, the anonymous function persists the specific map entry in the node's database. + // BatchIndex persists the specific map entry in the node's database. + // In a nutshell, we want to maintain a map from `blockID` to `epochStateEntry`, where `blockID` references the + // block that _proposes_ the referenced epoch protocol state entry. // Protocol convention: // - Consider block B, whose ingestion might potentially lead to an updated protocol state. For example, // the protocol state changes if we seal some execution results emitting service events. @@ -28,9 +24,8 @@ type EpochProtocolStateEntries interface { // - CAUTION: The protocol state requires confirmation by a QC and will only become active at the child block, // _after_ validating the QC. // - // Expected errors during normal operations: - // - storage.ErrAlreadyExists if a epoch sub-state for the given blockID has already been indexed - Index(blockID flow.Identifier, epochProtocolStateID flow.Identifier) func(*transaction.Tx) error + // No errors are expected during normal operation. + BatchIndex(rw ReaderBatchWriter, blockID flow.Identifier, epochProtocolStateID flow.Identifier) error // ByID returns the flow.RichEpochStateEntry by its ID. // Expected errors during normal operations: diff --git a/storage/epoch_setups.go b/storage/epoch_setups.go index 2cb88f8c2cc..6df0b4364cf 100644 --- a/storage/epoch_setups.go +++ b/storage/epoch_setups.go @@ -2,13 +2,13 @@ package storage import ( "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" ) type EpochSetups interface { - // StoreTx allows us to store a new epoch setup in a DB transaction while going through the cache. - StoreTx(*flow.EpochSetup) func(*transaction.Tx) error + // BatchStore allows us to store a new epoch setup in a DB batch update while going through the cache. + // No errors are expected during normal operation. + BatchStore(rw ReaderBatchWriter, setup *flow.EpochSetup) error // ByID will return the EpochSetup event by its ID. // Error returns: diff --git a/storage/guarantees.go b/storage/guarantees.go index 22804f22808..dae60367145 100644 --- a/storage/guarantees.go +++ b/storage/guarantees.go @@ -7,9 +7,6 @@ import ( // Guarantees represents persistent storage for collection guarantees. type Guarantees interface { - // Store inserts the collection guarantee. - Store(guarantee *flow.CollectionGuarantee) error - // ByCollectionID retrieves the collection guarantee by collection ID. ByCollectionID(collID flow.Identifier) (*flow.CollectionGuarantee, error) } diff --git a/storage/headers.go b/storage/headers.go index 45e2f7b4a22..dd3592c5faa 100644 --- a/storage/headers.go +++ b/storage/headers.go @@ -7,9 +7,6 @@ import ( // Headers represents persistent storage for blocks. type Headers interface { - // Store will store a header. - Store(header *flow.Header) error - // ByBlockID returns the header with the given ID. It is available for finalized and ambiguous blocks. // Error returns: // - ErrNotFound if no block header with the given ID exists @@ -18,6 +15,16 @@ type Headers interface { // ByHeight returns the block with the given number. It is only available for finalized blocks. ByHeight(height uint64) (*flow.Header, error) + // ByView returns the block with the given view. It is only available for certified blocks. + // Certified blocks are the blocks that have received QC. Hotstuff guarantees that for each view, + // at most one block is certified. Hence, the return value of `ByView` is guaranteed to be unique + // even for non-finalized blocks. + // Expected errors during normal operations: + // - `storage.ErrNotFound` if no certified block is known at given view. + // + // TODO: this method is not available until next spork (mainnet27) or a migration that builds the index. + // ByView(view uint64) (*flow.Header, error) + // Exists returns true if a header with the given ID has been stored. // No errors are expected during normal operation. Exists(blockID flow.Identifier) (bool, error) diff --git a/storage/index.go b/storage/index.go index a6e815c6c1f..377cd25e68a 100644 --- a/storage/index.go +++ b/storage/index.go @@ -6,9 +6,6 @@ import ( type Index interface { - // Store stores the index for a block payload. - Store(blockID flow.Identifier, index *flow.Index) error - // ByBlockID retrieves the index for a block payload. // Error returns: // - ErrNotFound if no block header with the given ID exists diff --git a/storage/latest_persisted_sealed_result.go b/storage/latest_persisted_sealed_result.go new file mode 100644 index 00000000000..5a49040cf31 --- /dev/null +++ b/storage/latest_persisted_sealed_result.go @@ -0,0 +1,17 @@ +package storage + +import "github.com/onflow/flow-go/model/flow" + +// LatestPersistedSealedResult tracks the most recently persisted sealed execution result processed +// by the Access ingestion engine. +type LatestPersistedSealedResult interface { + // Latest returns the ID and height of the latest persisted sealed result. + Latest() (flow.Identifier, uint64) + + // BatchSet updates the latest persisted sealed result in a batch operation + // The resultID and height are added to the provided batch, and the local data is updated only after + // the batch is successfully committed. + // + // No errors are expected during normal operation, + BatchSet(resultID flow.Identifier, height uint64, batch ReaderBatchWriter) error +} diff --git a/storage/locks.go b/storage/locks.go new file mode 100644 index 00000000000..2a3dcb42d09 --- /dev/null +++ b/storage/locks.go @@ -0,0 +1,108 @@ +package storage + +import ( + "sync" + + "github.com/jordanschalm/lockctx" +) + +// This file enumerates all named locks used by the storage layer. + +const ( + // LockInsertBlock protects the entire block insertion process (`ParticipantState.Extend` or `FollowerState.ExtendCertified`) + LockInsertBlock = "lock_insert_block" + // LockFinalizeBlock protects the entire block finalization process (`FollowerState.Finalize`) + LockFinalizeBlock = "lock_finalize_block" + // LockIndexResultApproval protects indexing result approvals by approval and chunk. + LockIndexResultApproval = "lock_index_result_approval" + // LockInsertOrFinalizeClusterBlock protects the entire cluster block insertion or finalization process. + // The reason they are combined is because insertion process reads some data updated by finalization process, + // in order to prevent dirty reads, we need to acquire the lock for both operations. + LockInsertOrFinalizeClusterBlock = "lock_insert_or_finalize_cluster_block" + // LockInsertOwnReceipt is intended for Execution Nodes to ensure that they never publish different receipts for the same block. + // Specifically, with this lock we prevent accidental overwrites of the index `executed block ID` ➜ `Receipt ID`. + LockInsertOwnReceipt = "lock_insert_own_receipt" + // LockInsertCollection protects the insertion of collections. + LockInsertCollection = "lock_insert_collection" +) + +// Locks returns a list of all named locks used by the storage layer. +func Locks() []string { + return []string{ + LockInsertBlock, + LockFinalizeBlock, + LockIndexResultApproval, + LockInsertOrFinalizeClusterBlock, + LockInsertOwnReceipt, + LockInsertCollection, + } +} + +type LockManager = lockctx.Manager + +// makeLockPolicy constructs the policy used by the storage layer to prevent deadlocks. +// We use a policy defined by a directed acyclic graph, where vertices represent named locks. +// A directed edge between two vertices A, B means: I can acquire B next after acquiring A. +// When no edges are added, each lock context may acquire at most one lock. +// +// For example, the bootstrapping logic both inserts and finalizes block. So it needs to +// acquire both LockInsertBlock and LockFinalizeBlock. To allow this, we add the directed +// edge LockInsertBlock -> LockFinalizeBlock with `Add(LockInsertBlock, LockFinalizeBlock)`. +// This means: +// - a context can acquire either LockInsertBlock or LockFinalizeBlock first (always true) +// - a context holding LockInsertBlock can acquire LockFinalizeBlock next (allowed by the edge) +// - a context holding LockFinalizeBlock cannot acquire LockInsertBlock next (disallowed, because the edge is directed) +// +// This function will panic if a policy is created which does not prevent deadlocks. +func makeLockPolicy() lockctx.Policy { + return lockctx.NewDAGPolicyBuilder(). + Add(LockInsertBlock, LockFinalizeBlock). + Build() +} + +var makeLockManagerOnce sync.Once + +// MakeSingletonLockManager returns the lock manager used by the storage layer. +// This function must be used for production builds and must be called exactly once process-wide. +// +// The Lock Manager is a core component enforcing atomicity of various storage operations across different +// components. Therefore, the lock manager is a singleton instance, as the storage layer's atomicity and +// consistency depends on the same set of locks being used everywhere. +// By convention, the lock mananger singleton is injected into the node's components during their +// initialization, following the same dependency-injection pattern as other components that are conceptually +// singletons (e.g. the storage layer abstractions). Thereby, we explicitly codify in the constructor that a +// component uses the lock mananger. We think it is helpful to emphasize that the component at times +// will acquire _exclusive access_ to all key-value pairs in the database whose keys start with some specific +// prefixes (see `storage/badger/operation/prefix.go` for an exhaustive list of prefixes). +// In comparison, the alternative pattern (which we do not use) of retrieving a singleton instance via a +// global variable would hide which components required exclusive storage access, and in addition, it would +// break with our broadly established dependency-injection pattern. To enforce best practices, this function +// will panic if it is called more than once. +// +// CAUTION: +// - The lock manager only guarantees atomicity of reads and writes for the thread holding the lock. +// Other threads can continue to read (possibly stale) values, while the lock is held by a different thread. +// - Furthermore, the writer must bundle all their writes into a _single_ Write Batch for atomicity. Even +// when holding the lock, reading threads can still observe the writes of one batch while not observing +// the writes of a second batch, despite the thread writing both batches while holding the lock. It was +// a deliberate choice for the sake of performance to allow reads without any locking - so instead of +// waiting for the newest value in case a write is currently ongoing, the reader will just retrieve the +// previous value. This aligns with our architecture of the node operating as an eventually-consistent +// system, which favors loose coupling and high throughput for different components within a node. +func MakeSingletonLockManager() lockctx.Manager { + var manager lockctx.Manager + makeLockManagerOnce.Do(func() { + manager = lockctx.NewManager(Locks(), makeLockPolicy()) + }) + if manager == nil { + panic("critical sanity check failed: MakeSingletonLockManager invoked more than once") + } + return manager +} + +// NewTestingLockManager returns the lock manager used by the storage layer. +// This function must be used for testing only but NOT for PRODUCTION builds. +// Unlike MakeSingletonLockManager, this function may be called multiple times. +func NewTestingLockManager() lockctx.Manager { + return lockctx.NewManager(Locks(), makeLockPolicy()) +} diff --git a/storage/migration/migration.go b/storage/migration/migration.go new file mode 100644 index 00000000000..4df596d81ff --- /dev/null +++ b/storage/migration/migration.go @@ -0,0 +1,373 @@ +package migration + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + + "github.com/rs/zerolog/log" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "golang.org/x/sync/errgroup" + + "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/storage" +) + +type MigrationConfig struct { + PebbleDir string + BatchByteSize int // the size of each batch to write to pebble + ReaderWorkerCount int // the number of workers to read from badger + WriterWorkerCount int // the number of workers to write to the pebble + + // number of prefix bytes used to assign iterator workload + // e.g, if the number is 1, it means the first byte of the key is used to divide into 256 key space, + // and each worker will be assigned to iterate all keys with the same first byte. + // Since keys are not evenly distributed, especially some table under a certain prefix byte may have + // a lot more data than others, we might choose to use 2 or 3 bytes to divide the key space, so that + // the redaer worker can concurrently iterate keys with the same prefix bytes (same table). + ReaderShardPrefixBytes int + + // ValidationMode determines how thorough the validation should be + // - PartialValidation: only checks min/max keys for each prefix (faster) + // - FullValidation: checks all keys in the database (more thorough) + ValidationMode ValidationMode + + ValidationOnly bool // if true, only validate the data in the badger db without copying it to pebble db +} + +type KVPairs struct { + Prefix []byte + Pairs []KVPair +} + +type KVPair struct { + Key []byte + Value []byte +} + +func GeneratePrefixes(n int) [][]byte { + if n == 0 { + return [][]byte{{}} + } + + base := 1 << (8 * n) + results := make([][]byte, 0, base) + + for i := 0; i < base; i++ { + buf := make([]byte, n) + switch n { + case 1: + buf[0] = byte(i) + case 2: + binary.BigEndian.PutUint16(buf, uint16(i)) + case 3: + buf[0] = byte(i >> 16) + buf[1] = byte(i >> 8) + buf[2] = byte(i) + default: + panic("unsupported prefix byte length") + } + results = append(results, buf) + } + return results +} + +func GenerateKeysShorterThanPrefix(n int) [][]byte { + allKeys := make([][]byte, 0) + for i := 1; i < n; i++ { + keys := GeneratePrefixes(i) + allKeys = append(allKeys, keys...) + } + return allKeys +} + +// readerWorker reads key-value pairs from BadgerDB using a prefix iterator. +func readerWorker( + ctx context.Context, + lgProgress func(int), + db *badger.DB, + jobs <-chan []byte, // each job is a prefix to iterate over + kvChan chan<- KVPairs, // channel to send key-value pairs to writer workers + batchSize int, +) error { + for prefix := range jobs { + err := db.View(func(txn *badger.Txn) error { + if ctx.Err() != nil { + return ctx.Err() + } + + options := badger.DefaultIteratorOptions + options.Prefix = prefix + it := txn.NewIterator(options) + defer it.Close() + + var ( + kvBatch []KVPair + currSize int + ) + + for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { + if ctx.Err() != nil { + return ctx.Err() + } + + item := it.Item() + key := item.KeyCopy(nil) + val, err := item.ValueCopy(nil) + if err != nil { + return err + } + + kvBatch = append(kvBatch, KVPair{Key: key, Value: val}) + currSize += len(key) + len(val) + + if currSize >= batchSize { + select { + case kvChan <- KVPairs{Prefix: prefix, Pairs: kvBatch}: + case <-ctx.Done(): + return ctx.Err() + } + kvBatch = nil + currSize = 0 + } + } + + if len(kvBatch) > 0 { + select { + case kvChan <- KVPairs{Prefix: prefix, Pairs: kvBatch}: + case <-ctx.Done(): + return ctx.Err() + } + } + + return nil + }) + + lgProgress(1) + + if err != nil { + return err + } + } + return nil +} + +func pebbleReaderWorker( + ctx context.Context, + lgProgress func(int), + db *pebble.DB, + jobs <-chan []byte, // each job is a prefix to iterate over + kvChan chan<- KVPairs, // channel to send key-value pairs to writer workers + batchSize int, +) error { + for prefix := range jobs { + if ctx.Err() != nil { + return ctx.Err() + } + + lowerBound, upperBound, hasUpperBound := storage.StartEndPrefixToLowerUpperBound(prefix, prefix) + options := pebble.IterOptions{ + LowerBound: lowerBound, + UpperBound: upperBound, + } + + if !hasUpperBound { + options.UpperBound = nil + } + + iter, err := db.NewIter(&options) + if err != nil { + return fmt.Errorf("failed to create iterator: %w", err) + } + defer iter.Close() + + var ( + kvBatch []KVPair + currSize int + ) + + for iter.First(); iter.Valid(); iter.Next() { + if ctx.Err() != nil { + return ctx.Err() + } + + key := iter.Key() + value := iter.Value() + + // Only process keys that start with our prefix + if !bytes.HasPrefix(key, prefix) { + break + } + + kvBatch = append(kvBatch, KVPair{ + Key: append([]byte(nil), key...), + Value: append([]byte(nil), value...), + }) + currSize += len(key) + len(value) + + if currSize >= batchSize { + select { + case kvChan <- KVPairs{Prefix: prefix, Pairs: kvBatch}: + case <-ctx.Done(): + return ctx.Err() + } + kvBatch = nil + currSize = 0 + } + } + + if len(kvBatch) > 0 { + select { + case kvChan <- KVPairs{Prefix: prefix, Pairs: kvBatch}: + case <-ctx.Done(): + return ctx.Err() + } + } + + lgProgress(1) + } + return nil +} + +// writerWorker writes key-value pairs to PebbleDB in batches. +func writerWorker(ctx context.Context, db *pebble.DB, kvChan <-chan KVPairs) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case kvGroup, ok := <-kvChan: + if !ok { + return nil + } + batch := db.NewBatch() + for _, kv := range kvGroup.Pairs { + if err := batch.Set(kv.Key, kv.Value, nil); err != nil { + return fmt.Errorf("fail to set key %x: %w", kv.Key, err) + } + } + + if err := batch.Commit(nil); err != nil { + return fmt.Errorf("fail to commit batch: %w", err) + } + } + } +} + +// CopyFromBadgerToPebble migrates all key-value pairs from a BadgerDB instance to a PebbleDB instance. +// +// The migration is performed in parallel using a configurable number of reader and writer workers. +// Reader workers iterate over the BadgerDB by sharded key prefixes (based on ReaderShardPrefixBytes) +// and send key-value pairs to a shared channel. Writer workers consume from this channel and write +// batched entries into PebbleDB. +// +// Configuration is provided via MigrationConfig: +// - BatchByteSize: maximum size in bytes for a single Pebble write batch. +// - ReaderWorkerCount: number of concurrent workers reading from Badger. +// - WriterWorkerCount: number of concurrent workers writing to Pebble. +// - ReaderShardPrefixBytes: number of bytes used to shard the keyspace for parallel iteration. +// +// The function blocks until all keys are migrated and written successfully. +// It returns an error if any part of the process fails. +func CopyFromBadgerToPebble(badgerDB *badger.DB, pebbleDB *pebble.DB, cfg MigrationConfig) error { + ctx, cancel := context.WithCancelCause(context.Background()) + defer cancel(nil) + + // Step 1: Copy all keys shorter than prefix + keysShorterThanPrefix := GenerateKeysShorterThanPrefix(cfg.ReaderShardPrefixBytes) + keyCount, err := copyExactKeysFromBadgerToPebble(badgerDB, pebbleDB, keysShorterThanPrefix) + if err != nil { + return fmt.Errorf("failed to copy keys shorter than prefix: %w", err) + } + log.Info().Msgf("Copied %d keys shorter than %v bytes prefix", keyCount, cfg.ReaderShardPrefixBytes) + + // Step 2: Copy all keys with prefix by first generating prefix shards and then + // using reader and writer workers to copy the keys with the same prefix + prefixes := GeneratePrefixes(cfg.ReaderShardPrefixBytes) + prefixJobs := make(chan []byte, len(prefixes)) + for _, prefix := range prefixes { + prefixJobs <- prefix + } + close(prefixJobs) + + kvChan := make(chan KVPairs, cfg.ReaderWorkerCount*2) + + lg := util.LogProgress( + log.Logger, + util.DefaultLogProgressConfig("migration keys from badger to pebble", len(prefixes)), + ) + + g, ctx := errgroup.WithContext(ctx) + + // Spawn reader workers + for i := 0; i < cfg.ReaderWorkerCount; i++ { + g.Go(func() error { + return readerWorker(ctx, lg, badgerDB, prefixJobs, kvChan, cfg.BatchByteSize) + }) + } + + // Spawn writer workers + for i := 0; i < cfg.WriterWorkerCount; i++ { + g.Go(func() error { + return writerWorker(ctx, pebbleDB, kvChan) + }) + } + + // Close kvChan after readers complete + go func() { + // Wait for all reader workers to complete + if err := g.Wait(); err != nil { + cancel(err) + } + close(kvChan) + }() + + // Wait for all workers to complete + if err := g.Wait(); err != nil { + return fmt.Errorf("migration failed: %w", err) + } + return context.Cause(ctx) +} + +func copyExactKeysFromBadgerToPebble(badgerDB *badger.DB, pebbleDB *pebble.DB, keys [][]byte) (int, error) { + batch := pebbleDB.NewBatch() + keyCount := 0 + err := badgerDB.View(func(txn *badger.Txn) error { + for _, key := range keys { + item, err := txn.Get(key) + if err != nil { + if errors.Is(err, badger.ErrKeyNotFound) { + // skip if the key is not found + continue + } + + return err + } + + err = item.Value(func(val []byte) error { + keyCount++ + return batch.Set(key, val, nil) + }) + + if err != nil { + return fmt.Errorf("failed to get value for key %x: %w", key, err) + } + } + + return nil + }) + + if err != nil { + return 0, fmt.Errorf("failed to get key from BadgerDB: %w", err) + } + + err = batch.Commit(pebble.Sync) + if err != nil { + return 0, fmt.Errorf("failed to commit batch to PebbleDB: %w", err) + } + + return keyCount, nil +} diff --git a/storage/migration/migration_test.go b/storage/migration/migration_test.go new file mode 100644 index 00000000000..3bdbfe1e310 --- /dev/null +++ b/storage/migration/migration_test.go @@ -0,0 +1,237 @@ +package migration + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/utils/unittest" +) + +func TestGeneratePrefixes(t *testing.T) { + t.Run("OneBytePrefix", func(t *testing.T) { + prefixes := GeneratePrefixes(1) + require.Len(t, prefixes, 256) + require.Equal(t, []byte{0x00}, prefixes[0]) + require.Equal(t, []byte{0x01}, prefixes[1]) + require.Equal(t, []byte{0xfe}, prefixes[254]) + require.Equal(t, []byte{0xff}, prefixes[255]) + }) + + t.Run("TwoBytePrefix", func(t *testing.T) { + prefixes := GeneratePrefixes(2) + require.Len(t, prefixes, 65536) + require.Equal(t, []byte{0x00, 0x00}, prefixes[0]) + require.Equal(t, []byte{0x00, 0x01}, prefixes[1]) + require.Equal(t, []byte{0xff, 0xfe}, prefixes[65534]) + require.Equal(t, []byte{0xff, 0xff}, prefixes[65535]) + }) +} + +func runMigrationTestCase(t *testing.T, testData map[string]string, cfg MigrationConfig) { + unittest.RunWithBadgerDBAndPebbleDB(t, func(badgerDB *badger.DB, pebbleDB *pebble.DB) { + // Load Badger with test data + require.NoError(t, badgerDB.Update(func(txn *badger.Txn) error { + for k, v := range testData { + if err := txn.Set([]byte(k), []byte(v)); err != nil { + return err + } + } + return nil + })) + + // Run migration + err := CopyFromBadgerToPebbleSSTables(badgerDB, pebbleDB, cfg) + require.NoError(t, err) + + // Validate each key + for k, expected := range testData { + val, closer, err := pebbleDB.Get([]byte(k)) + require.NoError(t, err, "pebbleDB.Get failed for key %s", k) + require.Equal(t, expected, string(val), "mismatched value for key %s", k) + require.NoError(t, closer.Close()) + } + + // Validate: Ensure Pebble have no additional key + iter, err := pebbleDB.NewIter(nil) + require.NoError(t, err) + defer iter.Close() + + seen := make(map[string]string) + + for iter.First(); iter.Valid(); iter.Next() { + k := string(iter.Key()) + v := string(iter.Value()) + + expectedVal, ok := testData[k] + require.True(t, ok, "unexpected key found in PebbleDB: %s", k) + require.Equal(t, expectedVal, v, "mismatched value for key %s", k) + + seen[k] = v + } + require.NoError(t, iter.Error(), "error iterating over PebbleDB") + + // Ensure all expected keys were seen + require.Equal(t, len(testData), len(seen), "PebbleDB key count mismatch") + }) +} + +// Simple deterministic dataset +func TestMigrationWithSimpleData1(t *testing.T) { + data := map[string]string{ + "a": "a single key byte", + "z": "a single key byte", + "apple": "fruit", + "banana": "yellow", + "carrot": "vegetable", + "dog": "animal", + "egg": "protein", + } + cfg := MigrationConfig{ + BatchByteSize: 1024, + ReaderWorkerCount: 2, + WriterWorkerCount: 2, + ReaderShardPrefixBytes: 1, + } + runMigrationTestCase(t, data, cfg) +} + +// Simple deterministic dataset +func TestMigrationWithSimpleDataAnd2PrefixBytes(t *testing.T) { + data := map[string]string{ + "a": "a single key byte", + "z": "a single key byte", + "apple": "fruit", + "banana": "yellow", + "carrot": "vegetable", + "dog": "animal", + "egg": "protein", + } + cfg := MigrationConfig{ + BatchByteSize: 1024, + ReaderWorkerCount: 2, + WriterWorkerCount: 2, + ReaderShardPrefixBytes: 2, + } + runMigrationTestCase(t, data, cfg) +} + +// Randomized data to simulate fuzzing +func TestMigrationWithFuzzyData(t *testing.T) { + data := generateRandomKVData(500, 10, 50) + cfg := MigrationConfig{ + BatchByteSize: 2048, + ReaderWorkerCount: 4, + WriterWorkerCount: 2, + ReaderShardPrefixBytes: 1, + } + runMigrationTestCase(t, data, cfg) +} + +// Fuzzy data with 2-byte prefix shard config +func TestMigrationWithFuzzyDataAndPrefix2(t *testing.T) { + data := generateRandomKVData(500, 10, 50) + cfg := MigrationConfig{ + BatchByteSize: 2048, + ReaderWorkerCount: 8, + WriterWorkerCount: 4, + ReaderShardPrefixBytes: 2, + } + runMigrationTestCase(t, data, cfg) +} + +// Utility: Generate random key-value pairs +func generateRandomKVData(count, keyLen, valLen int) map[string]string { + rng := rand.New(rand.NewSource(42)) // deterministic + data := make(map[string]string, count) + letters := []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") + + randomStr := func(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letters[rng.Intn(len(letters))] + } + return string(b) + } + + for i := 0; i < count; i++ { + k := randomStr(keyLen) + v := randomStr(valLen) + data[k] = v + } + return data +} + +func BenchmarkCopyFromBadgerToPebble(b *testing.B) { + // Configuration + const ( + numEntries = 1_000_000 + keySize = 16 + valueSize = 128 + batchByteSize = 4 * 1024 * 1024 // 4MB + readerWorkerCount = 4 + writerWorkerCount = 4 + prefixBytes = 1 + ) + + // Setup: Create temp dirs for Badger and Pebble + unittest.RunWithTempDirs(b, func(badgerDir, pebbleDir string) { + // Open Badger + badgerOpts := badger.DefaultOptions(badgerDir).WithLogger(nil) + badgerDB, err := badger.Open(badgerOpts) + if err != nil { + b.Fatalf("failed to open BadgerDB: %v", err) + } + defer badgerDB.Close() + + // Insert random data into Badger + rng := rand.New(rand.NewSource(42)) + batchSize := 100 + batchCount := numEntries / batchSize + for range batchSize { + err = badgerDB.Update(func(txn *badger.Txn) error { + for range batchCount { + key := make([]byte, keySize) + value := make([]byte, valueSize) + rng.Read(key) + rng.Read(value) + + if err := txn.Set(key, value); err != nil { + return fmt.Errorf("failed to set key %x: %w", key, err) + } + } + return nil + }) + if err != nil { + b.Fatalf("failed to insert data into BadgerDB: %v", err) + } + } + + // Open Pebble + pebbleDB, err := pebble.Open(pebbleDir, &pebble.Options{}) + if err != nil { + b.Fatalf("failed to open PebbleDB: %v", err) + } + defer pebbleDB.Close() + + // Setup migration config + cfg := MigrationConfig{ + BatchByteSize: batchByteSize, + ReaderWorkerCount: readerWorkerCount, + WriterWorkerCount: writerWorkerCount, + ReaderShardPrefixBytes: prefixBytes, + } + + // Benchmark the migration + b.ResetTimer() + b.StartTimer() + if err := CopyFromBadgerToPebble(badgerDB, pebbleDB, cfg); err != nil { + b.Fatalf("migration failed: %v", err) + } + b.StopTimer() + }) +} diff --git a/storage/migration/runner.go b/storage/migration/runner.go new file mode 100644 index 00000000000..25f3ae3acda --- /dev/null +++ b/storage/migration/runner.go @@ -0,0 +1,146 @@ +package migration + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "github.com/rs/zerolog/log" + + badgerstorage "github.com/onflow/flow-go/storage/badger" + pebblestorage "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/storage/util" +) + +var DefaultMigrationConfig = MigrationConfig{ + BatchByteSize: 32_000_000, // 32 MB + ReaderWorkerCount: 2, + WriterWorkerCount: 2, + ReaderShardPrefixBytes: 2, // better to keep it as 2. + ValidationMode: PartialValidation, // Default to partial validation +} + +func RunMigrationAndCompaction(badgerDir string, pebbleDir string, cfg MigrationConfig) error { + err := RunMigration(badgerDir, pebbleDir, cfg) + if err != nil { + return err + } + + err = ForceCompactPebbleDB(pebbleDir) + if err != nil { + return fmt.Errorf("failed to compact PebbleDB: %w", err) + } + + return nil +} + +// RunMigration performs a complete migration of key-value data from a BadgerDB directory +// to a PebbleDB directory and verifies the integrity of the migrated data. +// +// It executes the following steps: +// +// 1. Validates that the Badger directory exists and is non-empty. +// Ensures that the Pebble directory does not already contain data. +// 2. Opens both databases and runs the migration using CopyFromBadgerToPebble with the given config. +// 3. Writes a "MIGRATION_STARTED" marker file with a timestamp in the Pebble directory. +// 4. After migration, performs validation by: +// - For PartialValidation: Generates a list of prefix shards (based on 2-byte prefixes) +// and finds the min and max keys for each prefix group +// - For FullValidation: Validates all keys in the database +// 5. Writes a "MIGRATION_COMPLETED" marker file with a timestamp to signal successful completion. +// +// This function returns an error if any part of the process fails, including directory checks, +// database operations, or validation mismatches. +func RunMigration(badgerDir string, pebbleDir string, cfg MigrationConfig) error { + lg := log.With(). + Str("from-badger-dir", badgerDir). + Str("to-pebble-dir", pebbleDir). + Logger() + + // Step 1: Validate directories + lg.Info().Msg("Step 1/6: Starting directory validation...") + startTime := time.Now() + if !cfg.ValidationOnly { // when ValidationOnly is true, database folders can be not empty + if err := validateBadgerFolderExistPebbleFolderEmpty(badgerDir, pebbleDir); err != nil { + return fmt.Errorf("directory validation failed: %w", err) + } + } + lg.Info().Dur("duration", time.Since(startTime)).Msg("Step 1/6: Directory validation completed successfully") + + // Step 2: Open Badger and Pebble DBs + lg.Info().Msg("Step 2/6: Opening BadgerDB and PebbleDB...") + startTime = time.Now() + badgerOptions := badger.DefaultOptions(badgerDir). + WithLogger(util.NewLogger(log.Logger.With().Str("db", "badger").Logger())) + badgerDB, err := badgerstorage.SafeOpen(badgerOptions) + if err != nil { + return fmt.Errorf("failed to open BadgerDB: %w", err) + } + defer badgerDB.Close() + + cache := pebble.NewCache(pebblestorage.DefaultPebbleCacheSize) + defer cache.Unref() + pebbleDBOpts := pebblestorage.DefaultPebbleOptions(log.Logger, cache, pebble.DefaultComparer) + pebbleDBOpts.DisableAutomaticCompactions = true + + pebbleDB, err := pebble.Open(pebbleDir, pebbleDBOpts) + if err != nil { + return fmt.Errorf("failed to open PebbleDB: %w", err) + } + defer pebbleDB.Close() + lg.Info().Dur("duration", time.Since(startTime)).Msg("Step 2/6: BadgerDB and PebbleDB opened successfully") + + if cfg.ValidationOnly { + lg.Info().Str("mode", string(cfg.ValidationMode)).Msg("Step 6/6 Validation only mode enabled, skipping migration steps, Starting data validation...") + startTime = time.Now() + if err := validateData(badgerDB, pebbleDB, cfg); err != nil { + return fmt.Errorf("data validation failed: %w", err) + } + lg.Info().Dur("duration", time.Since(startTime)).Msg("Step 6/6: Data validation completed successfully") + + return nil + } + // Step 3: Write MIGRATION_STARTED file + lg.Info().Msg("Step 3/6: Writing migration start marker...") + startTime = time.Now() + startTimeStr := time.Now().Format(time.RFC3339) + startMarkerPath := filepath.Join(pebbleDir, "MIGRATION_STARTED") + startContent := fmt.Sprintf("migration started at %s\n", startTimeStr) + if err := os.WriteFile(startMarkerPath, []byte(startContent), 0644); err != nil { + return fmt.Errorf("failed to write MIGRATION_STARTED file: %w", err) + } + lg.Info().Dur("duration", time.Since(startTime)).Str("file", startMarkerPath).Msg("Step 3/6: Migration start marker written successfully") + + // Step 4: Migrate data + lg.Info().Msg("Step 4/6: Starting data migration...") + startTime = time.Now() + cfg.PebbleDir = pebbleDir + if err := CopyFromBadgerToPebbleSSTables(badgerDB, pebbleDB, cfg); err != nil { + return fmt.Errorf("failed to migrate data from Badger to Pebble: %w", err) + } + lg.Info().Dur("duration", time.Since(startTime)).Msg("Step 4/6: Data migration completed successfully") + + // Step 5: Validate data + lg.Info().Str("mode", string(cfg.ValidationMode)).Msg("Step 5/6: Starting data validation...") + startTime = time.Now() + if err := validateData(badgerDB, pebbleDB, cfg); err != nil { + return fmt.Errorf("data validation failed: %w", err) + } + lg.Info().Dur("duration", time.Since(startTime)).Msg("Step 5/6: Data validation completed successfully") + + // Step 6: Write MIGRATION_COMPLETED file + lg.Info().Msg("Step 6/6: Writing migration completion marker...") + startTime = time.Now() + endTime := time.Now().Format(time.RFC3339) + completeMarkerPath := filepath.Join(pebbleDir, "MIGRATION_COMPLETED") + completeContent := fmt.Sprintf("migration completed at %s\n", endTime) + if err := os.WriteFile(completeMarkerPath, []byte(completeContent), 0644); err != nil { + return fmt.Errorf("failed to write MIGRATION_COMPLETED file: %w", err) + } + lg.Info().Dur("duration", time.Since(startTime)).Str("file", completeMarkerPath).Msg("Step 6/6: Migration completion marker written successfully") + + return nil +} diff --git a/storage/migration/runner_test.go b/storage/migration/runner_test.go new file mode 100644 index 00000000000..ca852fb8f31 --- /dev/null +++ b/storage/migration/runner_test.go @@ -0,0 +1,142 @@ +package migration + +import ( + "os" + "path/filepath" + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "github.com/stretchr/testify/require" +) + +func TestRunMigration(t *testing.T) { + // Setup temporary directories + tmpDir := t.TempDir() + badgerDir := filepath.Join(tmpDir, "badger") + pebbleDir := filepath.Join(tmpDir, "pebble") + + // Create and open BadgerDB with test data + opts := badger.DefaultOptions(badgerDir).WithLogger(nil) + badgerDB, err := badger.Open(opts) + require.NoError(t, err) + + testData := map[string]string{ + "\x01\x02foo": "bar", + "\x01\x02baz": "qux", + "\x02\xffzip": "zap", + "\xff\xffzz": "last", + } + err = badgerDB.Update(func(txn *badger.Txn) error { + for k, v := range testData { + err := txn.Set([]byte(k), []byte(v)) + require.NoError(t, err) + } + return nil + }) + require.NoError(t, err) + require.NoError(t, badgerDB.Close()) // Close so MigrateAndValidate can reopen it + + // Define migration config + cfg := MigrationConfig{ + BatchByteSize: 1024, + ReaderWorkerCount: 2, + WriterWorkerCount: 2, + ReaderShardPrefixBytes: 2, + ValidationMode: PartialValidation, + } + + // Run migration + err = RunMigration(badgerDir, pebbleDir, cfg) + require.NoError(t, err) + + // Check marker files + startedPath := filepath.Join(pebbleDir, "MIGRATION_STARTED") + completedPath := filepath.Join(pebbleDir, "MIGRATION_COMPLETED") + + startedContent, err := os.ReadFile(startedPath) + require.NoError(t, err) + require.Contains(t, string(startedContent), "migration started") + + completedContent, err := os.ReadFile(completedPath) + require.NoError(t, err) + require.Contains(t, string(completedContent), "migration completed") + + // Open PebbleDB to confirm migrated values + pebbleDB, err := pebble.Open(pebbleDir, &pebble.Options{ + FormatMajorVersion: pebble.FormatNewest, + }) + require.NoError(t, err) + defer pebbleDB.Close() + + for k, expected := range testData { + val, closer, err := pebbleDB.Get([]byte(k)) + require.NoError(t, err) + require.Equal(t, expected, string(val)) + require.NoError(t, closer.Close()) + } +} + +func TestRunMigration_FullValidation(t *testing.T) { + // Setup temporary directories + tmpDir := t.TempDir() + badgerDir := filepath.Join(tmpDir, "badger") + pebbleDir := filepath.Join(tmpDir, "pebble") + + // Generate random test data + testData := generateRandomKVData(200, 8, 16) + + // Create and open BadgerDB with test data + opts := badger.DefaultOptions(badgerDir).WithLogger(nil) + badgerDB, err := badger.Open(opts) + require.NoError(t, err) + + err = badgerDB.Update(func(txn *badger.Txn) error { + for k, v := range testData { + err := txn.Set([]byte(k), []byte(v)) + require.NoError(t, err) + } + return nil + }) + require.NoError(t, err) + require.NoError(t, badgerDB.Close()) // Close so MigrateAndValidate can reopen it + + // Define migration config with FullValidation + cfg := MigrationConfig{ + BatchByteSize: 1024, + ReaderWorkerCount: 2, + WriterWorkerCount: 2, + ReaderShardPrefixBytes: 2, + ValidationMode: FullValidation, + } + + // Run migration + err = RunMigration(badgerDir, pebbleDir, cfg) + require.NoError(t, err) + + // Check marker files + startedPath := filepath.Join(pebbleDir, "MIGRATION_STARTED") + completedPath := filepath.Join(pebbleDir, "MIGRATION_COMPLETED") + + startedContent, err := os.ReadFile(startedPath) + require.NoError(t, err) + require.Contains(t, string(startedContent), "migration started") + + completedContent, err := os.ReadFile(completedPath) + require.NoError(t, err) + require.Contains(t, string(completedContent), "migration completed") + + // Open PebbleDB to confirm migrated values + pebbleDB, err := pebble.Open(pebbleDir, &pebble.Options{ + FormatMajorVersion: pebble.FormatNewest, + }) + require.NoError(t, err) + defer pebbleDB.Close() + + for k, expected := range testData { + val, closer, err := pebbleDB.Get([]byte(k)) + require.NoError(t, err) + require.Equal(t, expected, string(val)) + require.NoError(t, closer.Close()) + } +} diff --git a/storage/migration/sstables.go b/storage/migration/sstables.go new file mode 100644 index 00000000000..45c85802046 --- /dev/null +++ b/storage/migration/sstables.go @@ -0,0 +1,168 @@ +package migration + +import ( + "context" + "fmt" + "os" + "sync" + + "github.com/cockroachdb/pebble/v2" + "github.com/cockroachdb/pebble/v2/objstorage/objstorageprovider" + "github.com/cockroachdb/pebble/v2/sstable" + "github.com/cockroachdb/pebble/v2/vfs" + "github.com/dgraph-io/badger/v2" + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/module/util" +) + +// CopyFromBadgerToPebble copies all key-value pairs from a BadgerDB to a PebbleDB +// using SSTable ingestion. It reads BadgerDB in prefix-sharded ranges and writes +// those ranges into SSTable files, which are then ingested into Pebble. +func CopyFromBadgerToPebbleSSTables(badgerDB *badger.DB, pebbleDB *pebble.DB, cfg MigrationConfig) error { + sstableDir, err := os.MkdirTemp(cfg.PebbleDir, "flow-migration-temp-") + if err != nil { + return fmt.Errorf("failed to create temp dir: %w", err) + } + + log.Info().Msgf("Created temporary directory for SSTables: %s", sstableDir) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var ( + errOnce sync.Once + firstErr error + ) + + // once running into an exception, cancel the context and report the first error + reportFirstError := func(err error) { + if err != nil { + errOnce.Do(func() { + firstErr = err + cancel() + }) + } + } + + // Step 1: Copy all keys shorter than prefix + keysShorterThanPrefix := GenerateKeysShorterThanPrefix(cfg.ReaderShardPrefixBytes) + keyCount, err := copyExactKeysFromBadgerToPebble(badgerDB, pebbleDB, keysShorterThanPrefix) + if err != nil { + return fmt.Errorf("failed to copy keys shorter than prefix: %w", err) + } + log.Info().Msgf("Copied %d keys shorter than %v bytes prefix", keyCount, cfg.ReaderShardPrefixBytes) + + // Step 2: Copy all keys with prefix by first generating prefix shards and then + // using reader and writer workers to copy the keys with the same prefix + prefixes := GeneratePrefixes(cfg.ReaderShardPrefixBytes) + prefixJobs := make(chan []byte, len(prefixes)) + for _, prefix := range prefixes { + prefixJobs <- prefix + } + close(prefixJobs) + + kvChan := make(chan KVPairs, cfg.ReaderWorkerCount*2) + + lg := util.LogProgress( + log.Logger, + util.DefaultLogProgressConfig("migration keys from badger to pebble", len(prefixes)), + ) + + var readerWg sync.WaitGroup + for i := 0; i < cfg.ReaderWorkerCount; i++ { + readerWg.Add(1) + go func() { + defer readerWg.Done() + if err := readerWorker(ctx, lg, badgerDB, prefixJobs, kvChan, cfg.BatchByteSize); err != nil { + reportFirstError(err) + } + }() + } + + var writerWg sync.WaitGroup + for i := 0; i < cfg.WriterWorkerCount; i++ { + writerWg.Add(1) + go func() { + defer writerWg.Done() + if err := writerSSTableWorker(ctx, i, pebbleDB, sstableDir, kvChan); err != nil { + reportFirstError(err) + } + }() + } + + // Close kvChan after readers complete + go func() { + readerWg.Wait() + close(kvChan) + }() + + writerWg.Wait() + return firstErr +} + +func writerSSTableWorker(ctx context.Context, workerIndex int, db *pebble.DB, sstableDir string, kvChan <-chan KVPairs) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case kvGroup, ok := <-kvChan: + if !ok { + return nil + } + + filePath := fmt.Sprintf("%s/prefix_%x_worker_%v.sst", sstableDir, kvGroup.Prefix, workerIndex) + writer, err := createSSTableWriter(filePath) + if err != nil { + return err + } + + for _, kv := range kvGroup.Pairs { + if err := writer.Set(kv.Key, kv.Value); err != nil { + return fmt.Errorf("fail to set key %x: %w", kv.Key, err) + } + } + + if err := writer.Close(); err != nil { + return fmt.Errorf("fail to close writer: %w", err) + } + + err = db.Ingest(ctx, []string{filePath}) + if err != nil { + return fmt.Errorf("fail to ingest file %v: %w", filePath, err) + } + + log.Info().Msgf("Ingested SSTable file: %s", filePath) + } + } +} +func createSSTableWriter(filePath string) (*sstable.Writer, error) { + f, err := vfs.Default.Create(filePath, vfs.WriteCategoryUnspecified) + if err != nil { + return nil, err + } + + writable := objstorageprovider.NewFileWritable(f) + sstWriter := sstable.NewWriter(writable, sstable.WriterOptions{ + // pebble 1 is using TableFormatPebblev4, pebble 2's latest is TableFormatPebblev5 (TableFormatMax) + // in order to be compatible with pebble 1, we use TableFormatPebblev4 for now. + // TODO: use TableFormatMax in next spork + // TableFormat: sstable.TableFormatMax, + TableFormat: sstable.TableFormatPebblev4, + }) + + return sstWriter, nil +} + +func ForceCompactPebbleDB(pebbleDir string) error { + pebbleDB, err := pebble.Open(pebbleDir, &pebble.Options{ + // TODO: use FormatNewest in next spork + // FormatMajorVersion: pebble.FormatNewest, + FormatMajorVersion: pebble.FormatVirtualSSTables, + }) + if err != nil { + return err + } + + return pebbleDB.Compact([]byte{0x00}, []byte{0xff}, true) +} diff --git a/storage/migration/sstables_test.go b/storage/migration/sstables_test.go new file mode 100644 index 00000000000..a9201486862 --- /dev/null +++ b/storage/migration/sstables_test.go @@ -0,0 +1,63 @@ +package migration + +import ( + "context" + "os" + "path/filepath" + "sort" + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/cockroachdb/pebble/v2/objstorage/objstorageprovider" + "github.com/cockroachdb/pebble/v2/sstable" + "github.com/cockroachdb/pebble/v2/vfs" + "github.com/stretchr/testify/require" +) + +func TestPebbleSSTableIngest(t *testing.T) { + // Create a temporary directory for the Pebble DB + dir, err := os.MkdirTemp("", "pebble-test") + require.NoError(t, err) + defer os.RemoveAll(dir) + + // Open Pebble DB + db, err := pebble.Open(dir, &pebble.Options{ + FormatMajorVersion: pebble.FormatNewest, + }) + require.NoError(t, err) + defer db.Close() + + // Create an SSTable with a few key-values + sstPath := filepath.Join(dir, "test.sst") + file, err := vfs.Default.Create(sstPath, vfs.WriteCategoryUnspecified) + require.NoError(t, err) + writable := objstorageprovider.NewFileWritable(file) + writer := sstable.NewWriter(writable, sstable.WriterOptions{ + TableFormat: sstable.TableFormatMax, + }) + data := generateRandomKVData(500, 10, 50) + + // Sort the keys to ensure strictly increasing order + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + require.NoError(t, writer.Set([]byte(k), []byte(data[k]))) + } + + require.NoError(t, writer.Close()) + + // Ingest the SSTable into Pebble DB + require.NoError(t, db.Ingest(context.Background(), []string{sstPath})) + + // Verify the data exists + for _, k := range keys { + val, closer, err := db.Get([]byte(k)) + require.NoError(t, err, "expected key %s to exist", k) + require.Equal(t, data[k], string(val)) + closer.Close() + } +} diff --git a/storage/migration/validation.go b/storage/migration/validation.go new file mode 100644 index 00000000000..b78732ec664 --- /dev/null +++ b/storage/migration/validation.go @@ -0,0 +1,390 @@ +package migration + +import ( + "bytes" + "context" + "fmt" + "os" + "slices" + "time" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "github.com/rs/zerolog/log" + "golang.org/x/sync/errgroup" + + "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/storage" +) + +// ValidationMode defines how thorough the validation should be +type ValidationMode string + +const ( + // PartialValidation only checks min/max keys for each prefix + PartialValidation ValidationMode = "partial" + // FullValidation checks all keys in the database + FullValidation ValidationMode = "full" +) + +const batchSize = 10 + +func ParseValidationModeValid(mode string) (ValidationMode, error) { + switch mode { + case string(PartialValidation): + return PartialValidation, nil + case string(FullValidation): + return FullValidation, nil + default: + return "", fmt.Errorf("invalid validation mode: %s", mode) + } +} + +// isDirEmpty checks if a directory exists and is empty. +// Returns true if the directory is empty, false if it contains files, +// and an error if the directory doesn't exist or there's an error reading it. +func isDirEmpty(dir string) (bool, error) { + entries, err := os.ReadDir(dir) + if err != nil { + return false, err + } + return len(entries) == 0, nil +} + +// createDirIfNotExists creates a directory if it doesn't exist. +// Returns an error if the directory already exists and is not empty, +// or if there's an error creating the directory. +func createDirIfNotExists(dir string) error { + if stat, err := os.Stat(dir); err == nil { + if !stat.IsDir() { + return fmt.Errorf("path exists but is not a directory: %s", dir) + } + isEmpty, err := isDirEmpty(dir) + if err != nil { + return fmt.Errorf("failed to check if directory is empty: %w", err) + } + if !isEmpty { + return fmt.Errorf("directory exists and is not empty: %s", dir) + } + return nil + } else if !os.IsNotExist(err) { + return fmt.Errorf("error checking directory: %w", err) + } + + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + return nil +} + +// validateBadgerFolderExistPebbleFolderEmpty checks if the Badger directory exists and is non-empty, +// and if the Pebble directory does not exist or is empty. +func validateBadgerFolderExistPebbleFolderEmpty(badgerDir string, pebbleDir string) error { + // Step 1.1: Ensure Badger directory exists and is non-empty + isEmpty, err := isDirEmpty(badgerDir) + if err != nil { + return fmt.Errorf("badger directory invalid: %w", err) + } + if isEmpty { + return fmt.Errorf("badger directory is empty, %v", badgerDir) + } + + // Step 1.2: Ensure Pebble directory does not exist or is empty + if err := createDirIfNotExists(pebbleDir); err != nil { + return fmt.Errorf("pebble directory validation failed %v: %w", pebbleDir, err) + } + + return nil +} + +func validateMinMaxKeyConsistency(badgerDB *badger.DB, pebbleDB *pebble.DB, prefixBytes int) error { + keys, err := sampleValidationKeysByPrefix(badgerDB, prefixBytes) + if err != nil { + return fmt.Errorf("failed to collect validation keys: %w", err) + } + if err := compareValuesBetweenDBs(keys, badgerDB, pebbleDB); err != nil { + return fmt.Errorf("data mismatch found: %w", err) + } + return nil +} + +// sampleValidationKeysByPrefix takes a prefix bytes number (1 means 1 byte prefix, 2 means 2 bytes prefix, etc.), +// and returns a list of keys that are the min and max keys for each prefix. +// The output will be used to validate the consistency between Badger and Pebble databases. +// Why? Because we want to validate the consistency between Badger and Pebble databases by selecting +// some keys and compare their values between the two databases. +// An easy way to select keys is to go through each prefix, and find the min and max keys for each prefix using +// the database iterator. +func sampleValidationKeysByPrefix(db *badger.DB, prefixBytes int) ([][]byte, error) { + // this includes all prefixes that is shorter than or equal to prefixBytes + // for instance, if prefixBytes is 2, we will include all prefixes that is 1 byte or 2 bytes: + // [ + // [0x00], [0x01], [0x02], ..., [0xff], // 1 byte prefixes + // [0x00, 0x00], [0x00, 0x01], [0x00, 0x02], ..., [0xff, 0xff] // 2 byte prefixes + // ] + prefixes := GenerateKeysShorterThanPrefix(prefixBytes + 1) + var allKeys [][]byte + + err := db.View(func(txn *badger.Txn) error { + for _, prefix := range prefixes { + // Find min key + opts := badger.DefaultIteratorOptions + it := txn.NewIterator(opts) + it.Seek(prefix) + if it.ValidForPrefix(prefix) { + allKeys = append(allKeys, slices.Clone(it.Item().Key())) + } + it.Close() + + // Find max key with reverse iterator + opts.Reverse = true + it = txn.NewIterator(opts) + + // the upper bound is exclusive, so we need to seek to the upper bound + // when the prefix is [0xff,0xff], the end is nil, and we will iterate + // from the last key + end := storage.PrefixUpperBound(prefix) + it.Seek(end) + if it.ValidForPrefix(prefix) { + allKeys = append(allKeys, slices.Clone(it.Item().Key())) + } + it.Close() + } + return nil + }) + if err != nil { + return nil, err + } + + // Deduplicate keys + keyMap := make(map[string][]byte, len(allKeys)) + for _, k := range allKeys { + keyMap[string(k)] = k + } + uniqueKeys := make([][]byte, 0, len(keyMap)) + for _, k := range keyMap { + uniqueKeys = append(uniqueKeys, k) + } + + return uniqueKeys, nil +} + +// compareValuesBetweenDBs takes a list of keys and compares the values between Badger and Pebble databases, +// it returns error if any of the values are different. +func compareValuesBetweenDBs(keys [][]byte, badgerDB *badger.DB, pebbleDB *pebble.DB) error { + for _, key := range keys { + var badgerVal []byte + err := badgerDB.View(func(txn *badger.Txn) error { + item, err := txn.Get(key) + if err != nil { + return err + } + badgerVal, err = item.ValueCopy(nil) + return err + }) + if err != nil { + return fmt.Errorf("badger get error for key %x: %w", key, err) + } + + pebbleVal, closer, err := pebbleDB.Get(key) + if err != nil { + return fmt.Errorf("pebble get error for key %x: %w", key, err) + } + if string(pebbleVal) != string(badgerVal) { + return fmt.Errorf("value mismatch for key %x: badger=%q pebble=%q: %w", key, badgerVal, pebbleVal, + storage.ErrDataMismatch) + } + _ = closer.Close() + } + return nil +} + +// validateData performs validation based on the configured validation mode +func validateData(badgerDB *badger.DB, pebbleDB *pebble.DB, cfg MigrationConfig) error { + switch cfg.ValidationMode { + case PartialValidation: + return validateMinMaxKeyConsistency(badgerDB, pebbleDB, cfg.ReaderShardPrefixBytes) + case FullValidation: + return validateAllKeys(badgerDB, pebbleDB) + default: + return fmt.Errorf("unknown validation mode: %s", cfg.ValidationMode) + } +} + +// validateAllKeys performs a full validation by comparing all keys between Badger and Pebble +func validateAllKeys(badgerDB *badger.DB, pebbleDB *pebble.DB) error { + // Use the same prefix sharding as migration.go (default: 1 byte, but could be configurable) + const prefixBytes = 1 // or make this configurable if needed + prefixes := GeneratePrefixes(prefixBytes) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eg, ctx := errgroup.WithContext(ctx) + + lg := util.LogProgress( + log.Logger, + util.DefaultLogProgressConfig("verifying progress", len(prefixes)), + ) + + for _, prefix := range prefixes { + curPrefix := prefix // capture range variable + + eg.Go(func() error { + defer lg(1) + start := time.Now() + + // Channels for key-value pairs from Badger and Pebble + kvChanBadger := make(chan KVPairs, 1) + kvChanPebble := make(chan KVPairs, 1) + + // Progress logger (no-op for now) + // Start Badger reader worker + badgerErrCh := make(chan error, 1) + + // By wrapping a single prefix in a channel, badger worker and pebble worker can work on the same prefix. + go func() { + err := readerWorker(ctx, lg, badgerDB, singlePrefixChan(curPrefix), kvChanBadger, batchSize) + close(kvChanBadger) + badgerErrCh <- err + }() + + // each worker only process 1 prefix, so no need to log the progress. + // The progress is logged by the main goroutine + noopLogging := func(int) {} + // Start Pebble reader worker + pebbleErrCh := make(chan error, 1) + go func() { + err := pebbleReaderWorker(ctx, noopLogging, pebbleDB, singlePrefixChan(curPrefix), kvChanPebble, batchSize) + close(kvChanPebble) + pebbleErrCh <- err + }() + + // Compare outputs + err := compareKeyValuePairsFromChannels(ctx, kvChanBadger, kvChanPebble) + + // Wait for workers to finish and check for errors + badgerErr := <-badgerErrCh + pebbleErr := <-pebbleErrCh + + if badgerErr != nil { + return fmt.Errorf("badger reader error for prefix %x: %w", curPrefix, badgerErr) + } + if pebbleErr != nil { + return fmt.Errorf("pebble reader error for prefix %x: %w", curPrefix, pebbleErr) + } + if err != nil { + return fmt.Errorf("comparison error for prefix %x: %w", curPrefix, err) + } + + log.Info().Str("prefix", fmt.Sprintf("%x", curPrefix)). + Msgf("successfully validated prefix in %s", time.Since(start)) + return nil + }) + } + + if err := eg.Wait(); err != nil { + return err + } + return nil +} + +// singlePrefixChan returns a channel that yields a single prefix and then closes. +// Usage: This function is used in validateAllKeys when launching reader workers (e.g., readerWorker and pebbleReaderWorker) +// for the same prefix. +func singlePrefixChan(prefix []byte) <-chan []byte { + ch := make(chan []byte, 1) + ch <- prefix + close(ch) + return ch +} + +// compare the key value pairs from both channel, and return error if any pair is different, +// and return error if ctx is Done +func compareKeyValuePairsFromChannels(ctx context.Context, kvChanBadger <-chan KVPairs, kvChanPebble <-chan KVPairs) error { + var ( + kvBadger, kvPebble KVPairs + okBadger, okPebble bool + ) + + for { + // Read from both channels + select { + case <-ctx.Done(): + return fmt.Errorf("context cancelled while reading from badger: %w", ctx.Err()) + case kvBadger, okBadger = <-kvChanBadger: + if !okBadger { + kvBadger = KVPairs{} + } + } + + select { + case <-ctx.Done(): + return fmt.Errorf("context cancelled while reading from pebble: %w", ctx.Err()) + case kvPebble, okPebble = <-kvChanPebble: + if !okPebble { + kvPebble = KVPairs{} + } + } + + // If both channels are closed, we're done + if !okBadger && !okPebble { + break + } + + // Handle case where Badger channel is closed, Pebble channel is not. + if !okBadger && okPebble { + if len(kvPebble.Pairs) > 0 { + return fmt.Errorf("key %x exists in pebble but not in badger", kvPebble.Pairs[0].Key) + } + return fmt.Errorf("okBadger == false, okPebble == true, but okPebble has no keys") + } + + // Handle case where Pebble channel is closed, Badger channel is not. + if okBadger && !okPebble { + if len(kvBadger.Pairs) > 0 { + return fmt.Errorf("key %x exists in badger but not in pebble", kvBadger.Pairs[0].Key) + } + return fmt.Errorf("okBadger == true, okPebble == false, but okBadger has no keys") + } + + // Both channels are open, compare prefixes + if !bytes.Equal(kvBadger.Prefix, kvPebble.Prefix) { + return fmt.Errorf("prefix mismatch: badger=%x, pebble=%x", kvBadger.Prefix, kvPebble.Prefix) + } + + // Compare key-value pairs + i, j := 0, 0 + for i < len(kvBadger.Pairs) && j < len(kvPebble.Pairs) { + pairBadger := kvBadger.Pairs[i] + pairPebble := kvPebble.Pairs[j] + + cmp := bytes.Compare(pairBadger.Key, pairPebble.Key) + if cmp < 0 { + return fmt.Errorf("key %x exists in badger but not in pebble", pairBadger.Key) + } + if cmp > 0 { + return fmt.Errorf("key %x exists in pebble but not in badger", pairPebble.Key) + } + + // Keys are equal, compare values + if !bytes.Equal(pairBadger.Value, pairPebble.Value) { + return fmt.Errorf("value mismatch for key %x: badger=%x, pebble=%x", + pairBadger.Key, pairBadger.Value, pairPebble.Value) + } + + i++ + j++ + } + + // Check if there are remaining pairs in either channel + if i < len(kvBadger.Pairs) { + return fmt.Errorf("key %x exists in badger but not in pebble", kvBadger.Pairs[i].Key) + } + if j < len(kvPebble.Pairs) { + return fmt.Errorf("key %x exists in pebble but not in badger", kvPebble.Pairs[j].Key) + } + } + + return nil +} diff --git a/storage/migration/validation_test.go b/storage/migration/validation_test.go new file mode 100644 index 00000000000..f7abf578586 --- /dev/null +++ b/storage/migration/validation_test.go @@ -0,0 +1,143 @@ +package migration + +import ( + "context" + "sort" + "testing" + + "github.com/dgraph-io/badger/v2" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/utils/unittest" +) + +func TestSampleValidationKeysByPrefix(t *testing.T) { + unittest.RunWithBadgerDB(t, func(db *badger.DB) { + // Insert test keys + testKeys := []string{ + "\x01\x02keyA", // group: 0x01, 0x02 + "\x01\x02keyB", + "\x01\x03keyC", // group: 0x01, 0x03 + "\x02\x00keyD", // group: 0x02, 0x00 + "\x02\x00keyE", // group: 0x02, 0x00 + "\x02\x00keyF", // group: 0x02, 0x00 + "\xff\xfflast", + } + require.NoError(t, db.Update(func(txn *badger.Txn) error { + for _, k := range testKeys { + err := txn.Set([]byte(k), []byte("val_"+k)) + require.NoError(t, err) + } + return nil + })) + + // Run key collection + keys, err := sampleValidationKeysByPrefix(db, 2) + require.NoError(t, err) + + // Convert to string for easier comparison + var keyStrs []string + for _, k := range keys { + keyStrs = append(keyStrs, string(k)) + } + sort.Strings(keyStrs) + + // Expected keys are min and max for each 2-byte prefix group + expected := []string{ + "\x01\x02keyA", "\x01\x02keyB", // same group have both min and max + "\x01\x03keyC", // only one key in this group + "\x02\x00keyD", // min key of 0x02,0x00 + "\x02\x00keyF", // max key of 0x02,0x00 + "\xff\xfflast", // last key in this prefix + } + sort.Strings(expected) + require.ElementsMatch(t, expected, keyStrs) + }) +} + +func TestCompareKeyValuePairsFromChannels(t *testing.T) { + type testCase struct { + name string + badgerKVs []KVPairs + pebbleKVs []KVPairs + expectErr string // substring to match in error, or empty for no error + } + + prefix := []byte("pfx") + key1 := []byte("key1") + val1 := []byte("val1") + key2 := []byte("key2") + val2 := []byte("val2") + val2diff := []byte("DIFF") + + tests := []testCase{ + { + name: "matching pairs", + badgerKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}, {Key: key2, Value: val2}}}}, + pebbleKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}, {Key: key2, Value: val2}}}}, + expectErr: "", + }, + { + name: "value mismatch", + badgerKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}, {Key: key2, Value: val2}}}}, + pebbleKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}, {Key: key2, Value: val2diff}}}}, + expectErr: "value mismatch for key", + }, + { + name: "key missing in pebble", + badgerKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}, {Key: key2, Value: val2}}}}, + pebbleKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}}}}, + expectErr: "key 6b657932 exists in badger but not in pebble", + }, + { + name: "key missing in badger", + badgerKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}}}}, + pebbleKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}, {Key: key2, Value: val2}}}}, + expectErr: "key 6b657932 exists in pebble but not in badger", + }, + { + name: "prefix mismatch", + badgerKVs: []KVPairs{{Prefix: []byte("pfx1"), Pairs: []KVPair{{Key: key1, Value: val1}}}}, + pebbleKVs: []KVPairs{{Prefix: []byte("pfx2"), Pairs: []KVPair{{Key: key1, Value: val1}}}}, + expectErr: "prefix mismatch", + }, + { + name: "context cancelled", + badgerKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}}}}, + pebbleKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}}}}, + expectErr: "context cancelled", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + badgerCh := make(chan KVPairs, len(tc.badgerKVs)) + pebbleCh := make(chan KVPairs, len(tc.pebbleKVs)) + + for _, kv := range tc.badgerKVs { + badgerCh <- kv + } + close(badgerCh) + for _, kv := range tc.pebbleKVs { + pebbleCh <- kv + } + close(pebbleCh) + + if tc.name == "context cancelled" { + // Cancel context before running + cancel() + } + + err := compareKeyValuePairsFromChannels(ctx, badgerCh, pebbleCh) + if tc.expectErr == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expectErr) + } + }) + } +} diff --git a/storage/mock/batch.go b/storage/mock/batch.go index 5341b44f3b0..6c1edc6e14a 100644 --- a/storage/mock/batch.go +++ b/storage/mock/batch.go @@ -5,8 +5,6 @@ package mock import ( storage "github.com/onflow/flow-go/storage" mock "github.com/stretchr/testify/mock" - - sync "sync" ) // Batch is an autogenerated mock type for the Batch type @@ -75,11 +73,6 @@ func (_m *Batch) GlobalReader() storage.Reader { return r0 } -// Lock provides a mock function with given fields: _a0 -func (_m *Batch) Lock(_a0 *sync.Mutex) { - _m.Called(_a0) -} - // Writer provides a mock function with no fields func (_m *Batch) Writer() storage.Writer { ret := _m.Called() diff --git a/storage/mock/blocks.go b/storage/mock/blocks.go index 5b17b7e6509..d6924acfab2 100644 --- a/storage/mock/blocks.go +++ b/storage/mock/blocks.go @@ -3,10 +3,12 @@ package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" - transaction "github.com/onflow/flow-go/storage/badger/transaction" + storage "github.com/onflow/flow-go/storage" ) // Blocks is an autogenerated mock type for the Blocks type @@ -14,6 +16,24 @@ type Blocks struct { mock.Mock } +// BatchStore provides a mock function with given fields: lctx, rw, block +func (_m *Blocks) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, block *flow.Block) error { + ret := _m.Called(lctx, rw, block) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, *flow.Block) error); ok { + r0 = rf(lctx, rw, block) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ByCollectionID provides a mock function with given fields: collID func (_m *Blocks) ByCollectionID(collID flow.Identifier) (*flow.Block, error) { ret := _m.Called(collID) @@ -122,44 +142,6 @@ func (_m *Blocks) IndexBlockForCollections(blockID flow.Identifier, collIDs []fl return r0 } -// Store provides a mock function with given fields: block -func (_m *Blocks) Store(block *flow.Block) error { - ret := _m.Called(block) - - if len(ret) == 0 { - panic("no return value specified for Store") - } - - var r0 error - if rf, ok := ret.Get(0).(func(*flow.Block) error); ok { - r0 = rf(block) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StoreTx provides a mock function with given fields: block -func (_m *Blocks) StoreTx(block *flow.Block) func(*transaction.Tx) error { - ret := _m.Called(block) - - if len(ret) == 0 { - panic("no return value specified for StoreTx") - } - - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(*flow.Block) func(*transaction.Tx) error); ok { - r0 = rf(block) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) - } - } - - return r0 -} - // NewBlocks creates a new instance of Blocks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewBlocks(t interface { diff --git a/storage/mock/cluster_blocks.go b/storage/mock/cluster_blocks.go index c23de43e05b..4e00a57d037 100644 --- a/storage/mock/cluster_blocks.go +++ b/storage/mock/cluster_blocks.go @@ -74,24 +74,6 @@ func (_m *ClusterBlocks) ByID(blockID flow.Identifier) (*cluster.Block, error) { return r0, r1 } -// Store provides a mock function with given fields: block -func (_m *ClusterBlocks) Store(block *cluster.Block) error { - ret := _m.Called(block) - - if len(ret) == 0 { - panic("no return value specified for Store") - } - - var r0 error - if rf, ok := ret.Get(0).(func(*cluster.Block) error); ok { - r0 = rf(block) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // NewClusterBlocks creates a new instance of ClusterBlocks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewClusterBlocks(t interface { diff --git a/storage/mock/cluster_payloads.go b/storage/mock/cluster_payloads.go index 57665c708b5..c2675dc115f 100644 --- a/storage/mock/cluster_payloads.go +++ b/storage/mock/cluster_payloads.go @@ -44,24 +44,6 @@ func (_m *ClusterPayloads) ByBlockID(blockID flow.Identifier) (*cluster.Payload, return r0, r1 } -// Store provides a mock function with given fields: blockID, payload -func (_m *ClusterPayloads) Store(blockID flow.Identifier, payload *cluster.Payload) error { - ret := _m.Called(blockID, payload) - - if len(ret) == 0 { - panic("no return value specified for Store") - } - - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, *cluster.Payload) error); ok { - r0 = rf(blockID, payload) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // NewClusterPayloads creates a new instance of ClusterPayloads. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewClusterPayloads(t interface { diff --git a/storage/mock/collections.go b/storage/mock/collections.go index 42a4c774234..3d19cbdf4a7 100644 --- a/storage/mock/collections.go +++ b/storage/mock/collections.go @@ -3,8 +3,12 @@ package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + storage "github.com/onflow/flow-go/storage" ) // Collections is an autogenerated mock type for the Collections type @@ -12,6 +16,34 @@ type Collections struct { mock.Mock } +// BatchStoreAndIndexByTransaction provides a mock function with given fields: lctx, collection, batch +func (_m *Collections) BatchStoreAndIndexByTransaction(lctx lockctx.Proof, collection *flow.Collection, batch storage.ReaderBatchWriter) (flow.LightCollection, error) { + ret := _m.Called(lctx, collection, batch) + + if len(ret) == 0 { + panic("no return value specified for BatchStoreAndIndexByTransaction") + } + + var r0 flow.LightCollection + var r1 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, *flow.Collection, storage.ReaderBatchWriter) (flow.LightCollection, error)); ok { + return rf(lctx, collection, batch) + } + if rf, ok := ret.Get(0).(func(lockctx.Proof, *flow.Collection, storage.ReaderBatchWriter) flow.LightCollection); ok { + r0 = rf(lctx, collection, batch) + } else { + r0 = ret.Get(0).(flow.LightCollection) + } + + if rf, ok := ret.Get(1).(func(lockctx.Proof, *flow.Collection, storage.ReaderBatchWriter) error); ok { + r1 = rf(lctx, collection, batch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // ByID provides a mock function with given fields: collID func (_m *Collections) ByID(collID flow.Identifier) (*flow.Collection, error) { ret := _m.Called(collID) @@ -121,39 +153,59 @@ func (_m *Collections) Remove(collID flow.Identifier) error { } // Store provides a mock function with given fields: collection -func (_m *Collections) Store(collection *flow.Collection) error { +func (_m *Collections) Store(collection *flow.Collection) (flow.LightCollection, error) { ret := _m.Called(collection) if len(ret) == 0 { panic("no return value specified for Store") } - var r0 error - if rf, ok := ret.Get(0).(func(*flow.Collection) error); ok { + var r0 flow.LightCollection + var r1 error + if rf, ok := ret.Get(0).(func(*flow.Collection) (flow.LightCollection, error)); ok { + return rf(collection) + } + if rf, ok := ret.Get(0).(func(*flow.Collection) flow.LightCollection); ok { r0 = rf(collection) } else { - r0 = ret.Error(0) + r0 = ret.Get(0).(flow.LightCollection) } - return r0 + if rf, ok := ret.Get(1).(func(*flow.Collection) error); ok { + r1 = rf(collection) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// StoreLightAndIndexByTransaction provides a mock function with given fields: collection -func (_m *Collections) StoreLightAndIndexByTransaction(collection *flow.LightCollection) error { - ret := _m.Called(collection) +// StoreAndIndexByTransaction provides a mock function with given fields: lctx, collection +func (_m *Collections) StoreAndIndexByTransaction(lctx lockctx.Proof, collection *flow.Collection) (flow.LightCollection, error) { + ret := _m.Called(lctx, collection) if len(ret) == 0 { - panic("no return value specified for StoreLightAndIndexByTransaction") + panic("no return value specified for StoreAndIndexByTransaction") } - var r0 error - if rf, ok := ret.Get(0).(func(*flow.LightCollection) error); ok { - r0 = rf(collection) + var r0 flow.LightCollection + var r1 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, *flow.Collection) (flow.LightCollection, error)); ok { + return rf(lctx, collection) + } + if rf, ok := ret.Get(0).(func(lockctx.Proof, *flow.Collection) flow.LightCollection); ok { + r0 = rf(lctx, collection) } else { - r0 = ret.Error(0) + r0 = ret.Get(0).(flow.LightCollection) } - return r0 + if rf, ok := ret.Get(1).(func(lockctx.Proof, *flow.Collection) error); ok { + r1 = rf(lctx, collection) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // NewCollections creates a new instance of Collections. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. diff --git a/storage/mock/commits.go b/storage/mock/commits.go index 5c06c3c517b..5c09f4cde5f 100644 --- a/storage/mock/commits.go +++ b/storage/mock/commits.go @@ -3,7 +3,9 @@ package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" storage "github.com/onflow/flow-go/storage" @@ -32,17 +34,17 @@ func (_m *Commits) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.R return r0 } -// BatchStore provides a mock function with given fields: blockID, commit, batch -func (_m *Commits) BatchStore(blockID flow.Identifier, commit flow.StateCommitment, batch storage.ReaderBatchWriter) error { - ret := _m.Called(blockID, commit, batch) +// BatchStore provides a mock function with given fields: lctx, blockID, commit, batch +func (_m *Commits) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, commit flow.StateCommitment, batch storage.ReaderBatchWriter) error { + ret := _m.Called(lctx, blockID, commit, batch) if len(ret) == 0 { panic("no return value specified for BatchStore") } var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, flow.StateCommitment, storage.ReaderBatchWriter) error); ok { - r0 = rf(blockID, commit, batch) + if rf, ok := ret.Get(0).(func(lockctx.Proof, flow.Identifier, flow.StateCommitment, storage.ReaderBatchWriter) error); ok { + r0 = rf(lctx, blockID, commit, batch) } else { r0 = ret.Error(0) } @@ -80,24 +82,6 @@ func (_m *Commits) ByBlockID(blockID flow.Identifier) (flow.StateCommitment, err return r0, r1 } -// Store provides a mock function with given fields: blockID, commit -func (_m *Commits) Store(blockID flow.Identifier, commit flow.StateCommitment) error { - ret := _m.Called(blockID, commit) - - if len(ret) == 0 { - panic("no return value specified for Store") - } - - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, flow.StateCommitment) error); ok { - r0 = rf(blockID, commit) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // NewCommits creates a new instance of Commits. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewCommits(t interface { diff --git a/storage/mock/consumer_progress.go b/storage/mock/consumer_progress.go index ed72e3ce596..839b961253e 100644 --- a/storage/mock/consumer_progress.go +++ b/storage/mock/consumer_progress.go @@ -2,13 +2,34 @@ package mock -import mock "github.com/stretchr/testify/mock" +import ( + storage "github.com/onflow/flow-go/storage" + mock "github.com/stretchr/testify/mock" +) // ConsumerProgress is an autogenerated mock type for the ConsumerProgress type type ConsumerProgress struct { mock.Mock } +// BatchSetProcessedIndex provides a mock function with given fields: processed, batch +func (_m *ConsumerProgress) BatchSetProcessedIndex(processed uint64, batch storage.ReaderBatchWriter) error { + ret := _m.Called(processed, batch) + + if len(ret) == 0 { + panic("no return value specified for BatchSetProcessedIndex") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64, storage.ReaderBatchWriter) error); ok { + r0 = rf(processed, batch) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ProcessedIndex provides a mock function with no fields func (_m *ConsumerProgress) ProcessedIndex() (uint64, error) { ret := _m.Called() diff --git a/storage/mock/db.go b/storage/mock/db.go index 4f4632b636d..3db4cdf6639 100644 --- a/storage/mock/db.go +++ b/storage/mock/db.go @@ -12,6 +12,24 @@ type DB struct { mock.Mock } +// Close provides a mock function with no fields +func (_m *DB) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + // NewBatch provides a mock function with no fields func (_m *DB) NewBatch() storage.Batch { ret := _m.Called() diff --git a/storage/mock/epoch_commits.go b/storage/mock/epoch_commits.go index 67d3704762f..9cb75c61777 100644 --- a/storage/mock/epoch_commits.go +++ b/storage/mock/epoch_commits.go @@ -6,7 +6,7 @@ import ( flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" - transaction "github.com/onflow/flow-go/storage/badger/transaction" + storage "github.com/onflow/flow-go/storage" ) // EpochCommits is an autogenerated mock type for the EpochCommits type @@ -14,6 +14,24 @@ type EpochCommits struct { mock.Mock } +// BatchStore provides a mock function with given fields: rw, commit +func (_m *EpochCommits) BatchStore(rw storage.ReaderBatchWriter, commit *flow.EpochCommit) error { + ret := _m.Called(rw, commit) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(storage.ReaderBatchWriter, *flow.EpochCommit) error); ok { + r0 = rf(rw, commit) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ByID provides a mock function with given fields: _a0 func (_m *EpochCommits) ByID(_a0 flow.Identifier) (*flow.EpochCommit, error) { ret := _m.Called(_a0) @@ -44,26 +62,6 @@ func (_m *EpochCommits) ByID(_a0 flow.Identifier) (*flow.EpochCommit, error) { return r0, r1 } -// StoreTx provides a mock function with given fields: commit -func (_m *EpochCommits) StoreTx(commit *flow.EpochCommit) func(*transaction.Tx) error { - ret := _m.Called(commit) - - if len(ret) == 0 { - panic("no return value specified for StoreTx") - } - - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(*flow.EpochCommit) func(*transaction.Tx) error); ok { - r0 = rf(commit) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) - } - } - - return r0 -} - // NewEpochCommits creates a new instance of EpochCommits. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewEpochCommits(t interface { diff --git a/storage/mock/epoch_protocol_state_entries.go b/storage/mock/epoch_protocol_state_entries.go index 19569511a35..4f38b4d0a5c 100644 --- a/storage/mock/epoch_protocol_state_entries.go +++ b/storage/mock/epoch_protocol_state_entries.go @@ -6,7 +6,7 @@ import ( flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" - transaction "github.com/onflow/flow-go/storage/badger/transaction" + storage "github.com/onflow/flow-go/storage" ) // EpochProtocolStateEntries is an autogenerated mock type for the EpochProtocolStateEntries type @@ -14,6 +14,42 @@ type EpochProtocolStateEntries struct { mock.Mock } +// BatchIndex provides a mock function with given fields: rw, blockID, epochProtocolStateID +func (_m *EpochProtocolStateEntries) BatchIndex(rw storage.ReaderBatchWriter, blockID flow.Identifier, epochProtocolStateID flow.Identifier) error { + ret := _m.Called(rw, blockID, epochProtocolStateID) + + if len(ret) == 0 { + panic("no return value specified for BatchIndex") + } + + var r0 error + if rf, ok := ret.Get(0).(func(storage.ReaderBatchWriter, flow.Identifier, flow.Identifier) error); ok { + r0 = rf(rw, blockID, epochProtocolStateID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BatchStore provides a mock function with given fields: w, epochProtocolStateID, epochProtocolStateEntry +func (_m *EpochProtocolStateEntries) BatchStore(w storage.Writer, epochProtocolStateID flow.Identifier, epochProtocolStateEntry *flow.MinEpochStateEntry) error { + ret := _m.Called(w, epochProtocolStateID, epochProtocolStateEntry) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(storage.Writer, flow.Identifier, *flow.MinEpochStateEntry) error); ok { + r0 = rf(w, epochProtocolStateID, epochProtocolStateEntry) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ByBlockID provides a mock function with given fields: blockID func (_m *EpochProtocolStateEntries) ByBlockID(blockID flow.Identifier) (*flow.RichEpochStateEntry, error) { ret := _m.Called(blockID) @@ -74,46 +110,6 @@ func (_m *EpochProtocolStateEntries) ByID(id flow.Identifier) (*flow.RichEpochSt return r0, r1 } -// Index provides a mock function with given fields: blockID, epochProtocolStateID -func (_m *EpochProtocolStateEntries) Index(blockID flow.Identifier, epochProtocolStateID flow.Identifier) func(*transaction.Tx) error { - ret := _m.Called(blockID, epochProtocolStateID) - - if len(ret) == 0 { - panic("no return value specified for Index") - } - - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) func(*transaction.Tx) error); ok { - r0 = rf(blockID, epochProtocolStateID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) - } - } - - return r0 -} - -// StoreTx provides a mock function with given fields: epochProtocolStateID, epochProtocolStateEntry -func (_m *EpochProtocolStateEntries) StoreTx(epochProtocolStateID flow.Identifier, epochProtocolStateEntry *flow.MinEpochStateEntry) func(*transaction.Tx) error { - ret := _m.Called(epochProtocolStateID, epochProtocolStateEntry) - - if len(ret) == 0 { - panic("no return value specified for StoreTx") - } - - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(flow.Identifier, *flow.MinEpochStateEntry) func(*transaction.Tx) error); ok { - r0 = rf(epochProtocolStateID, epochProtocolStateEntry) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) - } - } - - return r0 -} - // NewEpochProtocolStateEntries creates a new instance of EpochProtocolStateEntries. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewEpochProtocolStateEntries(t interface { diff --git a/storage/mock/epoch_setups.go b/storage/mock/epoch_setups.go index dd917c6711d..2f276060af0 100644 --- a/storage/mock/epoch_setups.go +++ b/storage/mock/epoch_setups.go @@ -6,7 +6,7 @@ import ( flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" - transaction "github.com/onflow/flow-go/storage/badger/transaction" + storage "github.com/onflow/flow-go/storage" ) // EpochSetups is an autogenerated mock type for the EpochSetups type @@ -14,6 +14,24 @@ type EpochSetups struct { mock.Mock } +// BatchStore provides a mock function with given fields: rw, setup +func (_m *EpochSetups) BatchStore(rw storage.ReaderBatchWriter, setup *flow.EpochSetup) error { + ret := _m.Called(rw, setup) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(storage.ReaderBatchWriter, *flow.EpochSetup) error); ok { + r0 = rf(rw, setup) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ByID provides a mock function with given fields: _a0 func (_m *EpochSetups) ByID(_a0 flow.Identifier) (*flow.EpochSetup, error) { ret := _m.Called(_a0) @@ -44,26 +62,6 @@ func (_m *EpochSetups) ByID(_a0 flow.Identifier) (*flow.EpochSetup, error) { return r0, r1 } -// StoreTx provides a mock function with given fields: _a0 -func (_m *EpochSetups) StoreTx(_a0 *flow.EpochSetup) func(*transaction.Tx) error { - ret := _m.Called(_a0) - - if len(ret) == 0 { - panic("no return value specified for StoreTx") - } - - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(*flow.EpochSetup) func(*transaction.Tx) error); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) - } - } - - return r0 -} - // NewEpochSetups creates a new instance of EpochSetups. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewEpochSetups(t interface { diff --git a/storage/mock/execution_fork_evidence.go b/storage/mock/execution_fork_evidence.go new file mode 100644 index 00000000000..ed3bc7b0ede --- /dev/null +++ b/storage/mock/execution_fork_evidence.go @@ -0,0 +1,75 @@ +// Code generated by mockery v2.53.3. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// ExecutionForkEvidence is an autogenerated mock type for the ExecutionForkEvidence type +type ExecutionForkEvidence struct { + mock.Mock +} + +// Retrieve provides a mock function with no fields +func (_m *ExecutionForkEvidence) Retrieve() ([]*flow.IncorporatedResultSeal, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Retrieve") + } + + var r0 []*flow.IncorporatedResultSeal + var r1 error + if rf, ok := ret.Get(0).(func() ([]*flow.IncorporatedResultSeal, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []*flow.IncorporatedResultSeal); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*flow.IncorporatedResultSeal) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StoreIfNotExists provides a mock function with given fields: conflictingSeals +func (_m *ExecutionForkEvidence) StoreIfNotExists(conflictingSeals []*flow.IncorporatedResultSeal) error { + ret := _m.Called(conflictingSeals) + + if len(ret) == 0 { + panic("no return value specified for StoreIfNotExists") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]*flow.IncorporatedResultSeal) error); ok { + r0 = rf(conflictingSeals) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewExecutionForkEvidence creates a new instance of ExecutionForkEvidence. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionForkEvidence(t interface { + mock.TestingT + Cleanup(func()) +}) *ExecutionForkEvidence { + mock := &ExecutionForkEvidence{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/execution_results.go b/storage/mock/execution_results.go index 511d6eb3719..8613c47f9cd 100644 --- a/storage/mock/execution_results.go +++ b/storage/mock/execution_results.go @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" storage "github.com/onflow/flow-go/storage" - - transaction "github.com/onflow/flow-go/storage/badger/transaction" ) // ExecutionResults is an autogenerated mock type for the ExecutionResults type @@ -130,26 +128,6 @@ func (_m *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResul return r0, r1 } -// ByIDTx provides a mock function with given fields: resultID -func (_m *ExecutionResults) ByIDTx(resultID flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error) { - ret := _m.Called(resultID) - - if len(ret) == 0 { - panic("no return value specified for ByIDTx") - } - - var r0 func(*transaction.Tx) (*flow.ExecutionResult, error) - if rf, ok := ret.Get(0).(func(flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error)); ok { - r0 = rf(resultID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) (*flow.ExecutionResult, error)) - } - } - - return r0 -} - // ForceIndex provides a mock function with given fields: blockID, resultID func (_m *ExecutionResults) ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error { ret := _m.Called(blockID, resultID) diff --git a/storage/mock/execution_results_reader.go b/storage/mock/execution_results_reader.go index fc370ec82d9..c5e0d1998b9 100644 --- a/storage/mock/execution_results_reader.go +++ b/storage/mock/execution_results_reader.go @@ -5,8 +5,6 @@ package mock import ( flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" - - transaction "github.com/onflow/flow-go/storage/badger/transaction" ) // ExecutionResultsReader is an autogenerated mock type for the ExecutionResultsReader type @@ -74,26 +72,6 @@ func (_m *ExecutionResultsReader) ByID(resultID flow.Identifier) (*flow.Executio return r0, r1 } -// ByIDTx provides a mock function with given fields: resultID -func (_m *ExecutionResultsReader) ByIDTx(resultID flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error) { - ret := _m.Called(resultID) - - if len(ret) == 0 { - panic("no return value specified for ByIDTx") - } - - var r0 func(*transaction.Tx) (*flow.ExecutionResult, error) - if rf, ok := ret.Get(0).(func(flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error)); ok { - r0 = rf(resultID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) (*flow.ExecutionResult, error)) - } - } - - return r0 -} - // NewExecutionResultsReader creates a new instance of ExecutionResultsReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewExecutionResultsReader(t interface { diff --git a/storage/mock/guarantees.go b/storage/mock/guarantees.go index 48ab17c9956..e6d480c0058 100644 --- a/storage/mock/guarantees.go +++ b/storage/mock/guarantees.go @@ -42,24 +42,6 @@ func (_m *Guarantees) ByCollectionID(collID flow.Identifier) (*flow.CollectionGu return r0, r1 } -// Store provides a mock function with given fields: guarantee -func (_m *Guarantees) Store(guarantee *flow.CollectionGuarantee) error { - ret := _m.Called(guarantee) - - if len(ret) == 0 { - panic("no return value specified for Store") - } - - var r0 error - if rf, ok := ret.Get(0).(func(*flow.CollectionGuarantee) error); ok { - r0 = rf(guarantee) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // NewGuarantees creates a new instance of Guarantees. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewGuarantees(t interface { diff --git a/storage/mock/headers.go b/storage/mock/headers.go index abf606ccda7..03648152529 100644 --- a/storage/mock/headers.go +++ b/storage/mock/headers.go @@ -160,24 +160,6 @@ func (_m *Headers) Exists(blockID flow.Identifier) (bool, error) { return r0, r1 } -// Store provides a mock function with given fields: header -func (_m *Headers) Store(header *flow.Header) error { - ret := _m.Called(header) - - if len(ret) == 0 { - panic("no return value specified for Store") - } - - var r0 error - if rf, ok := ret.Get(0).(func(*flow.Header) error); ok { - r0 = rf(header) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // NewHeaders creates a new instance of Headers. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewHeaders(t interface { diff --git a/storage/mock/index.go b/storage/mock/index.go index 72ada45a812..0d2dffd37cd 100644 --- a/storage/mock/index.go +++ b/storage/mock/index.go @@ -42,24 +42,6 @@ func (_m *Index) ByBlockID(blockID flow.Identifier) (*flow.Index, error) { return r0, r1 } -// Store provides a mock function with given fields: blockID, index -func (_m *Index) Store(blockID flow.Identifier, index *flow.Index) error { - ret := _m.Called(blockID, index) - - if len(ret) == 0 { - panic("no return value specified for Store") - } - - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, *flow.Index) error); ok { - r0 = rf(blockID, index) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // NewIndex creates a new instance of Index. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewIndex(t interface { diff --git a/storage/mock/latest_persisted_sealed_result.go b/storage/mock/latest_persisted_sealed_result.go new file mode 100644 index 00000000000..c8e72739aa3 --- /dev/null +++ b/storage/mock/latest_persisted_sealed_result.go @@ -0,0 +1,77 @@ +// Code generated by mockery v2.53.3. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + storage "github.com/onflow/flow-go/storage" +) + +// LatestPersistedSealedResult is an autogenerated mock type for the LatestPersistedSealedResult type +type LatestPersistedSealedResult struct { + mock.Mock +} + +// BatchSet provides a mock function with given fields: resultID, height, batch +func (_m *LatestPersistedSealedResult) BatchSet(resultID flow.Identifier, height uint64, batch storage.ReaderBatchWriter) error { + ret := _m.Called(resultID, height, batch) + + if len(ret) == 0 { + panic("no return value specified for BatchSet") + } + + var r0 error + if rf, ok := ret.Get(0).(func(flow.Identifier, uint64, storage.ReaderBatchWriter) error); ok { + r0 = rf(resultID, height, batch) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Latest provides a mock function with no fields +func (_m *LatestPersistedSealedResult) Latest() (flow.Identifier, uint64) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Latest") + } + + var r0 flow.Identifier + var r1 uint64 + if rf, ok := ret.Get(0).(func() (flow.Identifier, uint64)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() flow.Identifier); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + if rf, ok := ret.Get(1).(func() uint64); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(uint64) + } + + return r0, r1 +} + +// NewLatestPersistedSealedResult creates a new instance of LatestPersistedSealedResult. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLatestPersistedSealedResult(t interface { + mock.TestingT + Cleanup(func()) +}) *LatestPersistedSealedResult { + mock := &LatestPersistedSealedResult{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/lock_manager.go b/storage/mock/lock_manager.go new file mode 100644 index 00000000000..4a70fdce0ed --- /dev/null +++ b/storage/mock/lock_manager.go @@ -0,0 +1,47 @@ +// Code generated by mockery v2.53.3. DO NOT EDIT. + +package mock + +import ( + lockctx "github.com/jordanschalm/lockctx" + mock "github.com/stretchr/testify/mock" +) + +// LockManager is an autogenerated mock type for the LockManager type +type LockManager struct { + mock.Mock +} + +// NewContext provides a mock function with no fields +func (_m *LockManager) NewContext() lockctx.Context { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for NewContext") + } + + var r0 lockctx.Context + if rf, ok := ret.Get(0).(func() lockctx.Context); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(lockctx.Context) + } + } + + return r0 +} + +// NewLockManager creates a new instance of LockManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLockManager(t interface { + mock.TestingT + Cleanup(func()) +}) *LockManager { + mock := &LockManager{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/my_execution_receipts.go b/storage/mock/my_execution_receipts.go index 7d15f60146c..457690019ae 100644 --- a/storage/mock/my_execution_receipts.go +++ b/storage/mock/my_execution_receipts.go @@ -3,7 +3,9 @@ package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" storage "github.com/onflow/flow-go/storage" @@ -32,17 +34,17 @@ func (_m *MyExecutionReceipts) BatchRemoveIndexByBlockID(blockID flow.Identifier return r0 } -// BatchStoreMyReceipt provides a mock function with given fields: receipt, batch -func (_m *MyExecutionReceipts) BatchStoreMyReceipt(receipt *flow.ExecutionReceipt, batch storage.ReaderBatchWriter) error { - ret := _m.Called(receipt, batch) +// BatchStoreMyReceipt provides a mock function with given fields: lctx, receipt, batch +func (_m *MyExecutionReceipts) BatchStoreMyReceipt(lctx lockctx.Proof, receipt *flow.ExecutionReceipt, batch storage.ReaderBatchWriter) error { + ret := _m.Called(lctx, receipt, batch) if len(ret) == 0 { panic("no return value specified for BatchStoreMyReceipt") } var r0 error - if rf, ok := ret.Get(0).(func(*flow.ExecutionReceipt, storage.ReaderBatchWriter) error); ok { - r0 = rf(receipt, batch) + if rf, ok := ret.Get(0).(func(lockctx.Proof, *flow.ExecutionReceipt, storage.ReaderBatchWriter) error); ok { + r0 = rf(lctx, receipt, batch) } else { r0 = ret.Error(0) } diff --git a/storage/mock/payloads.go b/storage/mock/payloads.go index 69f3597ada5..b0b7b936616 100644 --- a/storage/mock/payloads.go +++ b/storage/mock/payloads.go @@ -42,24 +42,6 @@ func (_m *Payloads) ByBlockID(blockID flow.Identifier) (*flow.Payload, error) { return r0, r1 } -// Store provides a mock function with given fields: blockID, payload -func (_m *Payloads) Store(blockID flow.Identifier, payload *flow.Payload) error { - ret := _m.Called(blockID, payload) - - if len(ret) == 0 { - panic("no return value specified for Store") - } - - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, *flow.Payload) error); ok { - r0 = rf(blockID, payload) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // NewPayloads creates a new instance of Payloads. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewPayloads(t interface { diff --git a/storage/mock/protocol_kv_store.go b/storage/mock/protocol_kv_store.go index bc7ae279994..59e5f97d779 100644 --- a/storage/mock/protocol_kv_store.go +++ b/storage/mock/protocol_kv_store.go @@ -3,10 +3,12 @@ package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" - transaction "github.com/onflow/flow-go/storage/badger/transaction" + storage "github.com/onflow/flow-go/storage" ) // ProtocolKVStore is an autogenerated mock type for the ProtocolKVStore type @@ -14,6 +16,42 @@ type ProtocolKVStore struct { mock.Mock } +// BatchIndex provides a mock function with given fields: lctx, rw, blockID, stateID +func (_m *ProtocolKVStore) BatchIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, stateID flow.Identifier) error { + ret := _m.Called(lctx, rw, blockID, stateID) + + if len(ret) == 0 { + panic("no return value specified for BatchIndex") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, flow.Identifier, flow.Identifier) error); ok { + r0 = rf(lctx, rw, blockID, stateID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BatchStore provides a mock function with given fields: lctx, rw, stateID, data +func (_m *ProtocolKVStore) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, stateID flow.Identifier, data *flow.PSKeyValueStoreData) error { + ret := _m.Called(lctx, rw, stateID, data) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, flow.Identifier, *flow.PSKeyValueStoreData) error); ok { + r0 = rf(lctx, rw, stateID, data) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ByBlockID provides a mock function with given fields: blockID func (_m *ProtocolKVStore) ByBlockID(blockID flow.Identifier) (*flow.PSKeyValueStoreData, error) { ret := _m.Called(blockID) @@ -74,46 +112,6 @@ func (_m *ProtocolKVStore) ByID(id flow.Identifier) (*flow.PSKeyValueStoreData, return r0, r1 } -// IndexTx provides a mock function with given fields: blockID, stateID -func (_m *ProtocolKVStore) IndexTx(blockID flow.Identifier, stateID flow.Identifier) func(*transaction.Tx) error { - ret := _m.Called(blockID, stateID) - - if len(ret) == 0 { - panic("no return value specified for IndexTx") - } - - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) func(*transaction.Tx) error); ok { - r0 = rf(blockID, stateID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) - } - } - - return r0 -} - -// StoreTx provides a mock function with given fields: stateID, data -func (_m *ProtocolKVStore) StoreTx(stateID flow.Identifier, data *flow.PSKeyValueStoreData) func(*transaction.Tx) error { - ret := _m.Called(stateID, data) - - if len(ret) == 0 { - panic("no return value specified for StoreTx") - } - - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(flow.Identifier, *flow.PSKeyValueStoreData) func(*transaction.Tx) error); ok { - r0 = rf(stateID, data) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) - } - } - - return r0 -} - // NewProtocolKVStore creates a new instance of ProtocolKVStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewProtocolKVStore(t interface { diff --git a/storage/mock/quorum_certificates.go b/storage/mock/quorum_certificates.go index e7960a30fa1..6fbc6592631 100644 --- a/storage/mock/quorum_certificates.go +++ b/storage/mock/quorum_certificates.go @@ -3,10 +3,12 @@ package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" - transaction "github.com/onflow/flow-go/storage/badger/transaction" + storage "github.com/onflow/flow-go/storage" ) // QuorumCertificates is an autogenerated mock type for the QuorumCertificates type @@ -14,6 +16,24 @@ type QuorumCertificates struct { mock.Mock } +// BatchStore provides a mock function with given fields: _a0, _a1, _a2 +func (_m *QuorumCertificates) BatchStore(_a0 lockctx.Proof, _a1 storage.ReaderBatchWriter, _a2 *flow.QuorumCertificate) error { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, *flow.QuorumCertificate) error); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ByBlockID provides a mock function with given fields: blockID func (_m *QuorumCertificates) ByBlockID(blockID flow.Identifier) (*flow.QuorumCertificate, error) { ret := _m.Called(blockID) @@ -44,26 +64,6 @@ func (_m *QuorumCertificates) ByBlockID(blockID flow.Identifier) (*flow.QuorumCe return r0, r1 } -// StoreTx provides a mock function with given fields: qc -func (_m *QuorumCertificates) StoreTx(qc *flow.QuorumCertificate) func(*transaction.Tx) error { - ret := _m.Called(qc) - - if len(ret) == 0 { - panic("no return value specified for StoreTx") - } - - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(*flow.QuorumCertificate) func(*transaction.Tx) error); ok { - r0 = rf(qc) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) - } - } - - return r0 -} - // NewQuorumCertificates creates a new instance of QuorumCertificates. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewQuorumCertificates(t interface { diff --git a/storage/mock/reader_batch_writer.go b/storage/mock/reader_batch_writer.go index eaf48226e84..b973cef56dd 100644 --- a/storage/mock/reader_batch_writer.go +++ b/storage/mock/reader_batch_writer.go @@ -5,8 +5,6 @@ package mock import ( storage "github.com/onflow/flow-go/storage" mock "github.com/stretchr/testify/mock" - - sync "sync" ) // ReaderBatchWriter is an autogenerated mock type for the ReaderBatchWriter type @@ -39,11 +37,6 @@ func (_m *ReaderBatchWriter) GlobalReader() storage.Reader { return r0 } -// Lock provides a mock function with given fields: _a0 -func (_m *ReaderBatchWriter) Lock(_a0 *sync.Mutex) { - _m.Called(_a0) -} - // Writer provides a mock function with no fields func (_m *ReaderBatchWriter) Writer() storage.Writer { ret := _m.Called() diff --git a/storage/mock/result_approvals.go b/storage/mock/result_approvals.go index c961f39b64d..c103768799b 100644 --- a/storage/mock/result_approvals.go +++ b/storage/mock/result_approvals.go @@ -3,7 +3,9 @@ package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" ) @@ -72,37 +74,21 @@ func (_m *ResultApprovals) ByID(approvalID flow.Identifier) (*flow.ResultApprova return r0, r1 } -// Index provides a mock function with given fields: resultID, chunkIndex, approvalID -func (_m *ResultApprovals) Index(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) error { - ret := _m.Called(resultID, chunkIndex, approvalID) +// StoreMyApproval provides a mock function with given fields: approval +func (_m *ResultApprovals) StoreMyApproval(approval *flow.ResultApproval) func(lockctx.Proof) error { + ret := _m.Called(approval) if len(ret) == 0 { - panic("no return value specified for Index") + panic("no return value specified for StoreMyApproval") } - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, uint64, flow.Identifier) error); ok { - r0 = rf(resultID, chunkIndex, approvalID) + var r0 func(lockctx.Proof) error + if rf, ok := ret.Get(0).(func(*flow.ResultApproval) func(lockctx.Proof) error); ok { + r0 = rf(approval) } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Store provides a mock function with given fields: result -func (_m *ResultApprovals) Store(result *flow.ResultApproval) error { - ret := _m.Called(result) - - if len(ret) == 0 { - panic("no return value specified for Store") - } - - var r0 error - if rf, ok := ret.Get(0).(func(*flow.ResultApproval) error); ok { - r0 = rf(result) - } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(func(lockctx.Proof) error) + } } return r0 diff --git a/storage/mock/transaction_result_error_messages.go b/storage/mock/transaction_result_error_messages.go index 9c5a586d5a3..414d3cf57ad 100644 --- a/storage/mock/transaction_result_error_messages.go +++ b/storage/mock/transaction_result_error_messages.go @@ -5,6 +5,8 @@ package mock import ( flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" + + storage "github.com/onflow/flow-go/storage" ) // TransactionResultErrorMessages is an autogenerated mock type for the TransactionResultErrorMessages type @@ -12,6 +14,24 @@ type TransactionResultErrorMessages struct { mock.Mock } +// BatchStore provides a mock function with given fields: blockID, transactionResultErrorMessages, batch +func (_m *TransactionResultErrorMessages) BatchStore(blockID flow.Identifier, transactionResultErrorMessages []flow.TransactionResultErrorMessage, batch storage.ReaderBatchWriter) error { + ret := _m.Called(blockID, transactionResultErrorMessages, batch) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.TransactionResultErrorMessage, storage.ReaderBatchWriter) error); ok { + r0 = rf(blockID, transactionResultErrorMessages, batch) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ByBlockID provides a mock function with given fields: id func (_m *TransactionResultErrorMessages) ByBlockID(id flow.Identifier) ([]flow.TransactionResultErrorMessage, error) { ret := _m.Called(id) diff --git a/storage/mock/transactions.go b/storage/mock/transactions.go index 0a333dbfd4a..7bb935462e3 100644 --- a/storage/mock/transactions.go +++ b/storage/mock/transactions.go @@ -5,6 +5,8 @@ package mock import ( flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" + + storage "github.com/onflow/flow-go/storage" ) // Transactions is an autogenerated mock type for the Transactions type @@ -12,6 +14,24 @@ type Transactions struct { mock.Mock } +// BatchStore provides a mock function with given fields: tx, batch +func (_m *Transactions) BatchStore(tx *flow.TransactionBody, batch storage.ReaderBatchWriter) error { + ret := _m.Called(tx, batch) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*flow.TransactionBody, storage.ReaderBatchWriter) error); ok { + r0 = rf(tx, batch) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ByID provides a mock function with given fields: txID func (_m *Transactions) ByID(txID flow.Identifier) (*flow.TransactionBody, error) { ret := _m.Called(txID) diff --git a/storage/mocks/storage.go b/storage/mocks/storage.go index 2dba0002216..3bbe715f4ea 100644 --- a/storage/mocks/storage.go +++ b/storage/mocks/storage.go @@ -8,9 +8,9 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" storage "github.com/onflow/flow-go/storage" - transaction "github.com/onflow/flow-go/storage/badger/transaction" ) // MockBlocks is a mock of Blocks interface. @@ -36,6 +36,20 @@ func (m *MockBlocks) EXPECT() *MockBlocksMockRecorder { return m.recorder } +// BatchStore mocks base method. +func (m *MockBlocks) BatchStore(arg0 lockctx.Proof, arg1 storage.ReaderBatchWriter, arg2 *flow.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BatchStore", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// BatchStore indicates an expected call of BatchStore. +func (mr *MockBlocksMockRecorder) BatchStore(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchStore", reflect.TypeOf((*MockBlocks)(nil).BatchStore), arg0, arg1, arg2) +} + // ByCollectionID mocks base method. func (m *MockBlocks) ByCollectionID(arg0 flow.Identifier) (*flow.Block, error) { m.ctrl.T.Helper() @@ -95,34 +109,6 @@ func (mr *MockBlocksMockRecorder) IndexBlockForCollections(arg0, arg1 interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IndexBlockForCollections", reflect.TypeOf((*MockBlocks)(nil).IndexBlockForCollections), arg0, arg1) } -// Store mocks base method. -func (m *MockBlocks) Store(arg0 *flow.Block) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Store", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Store indicates an expected call of Store. -func (mr *MockBlocksMockRecorder) Store(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Store", reflect.TypeOf((*MockBlocks)(nil).Store), arg0) -} - -// StoreTx mocks base method. -func (m *MockBlocks) StoreTx(arg0 *flow.Block) func(*transaction.Tx) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StoreTx", arg0) - ret0, _ := ret[0].(func(*transaction.Tx) error) - return ret0 -} - -// StoreTx indicates an expected call of StoreTx. -func (mr *MockBlocksMockRecorder) StoreTx(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreTx", reflect.TypeOf((*MockBlocks)(nil).StoreTx), arg0) -} - // MockHeaders is a mock of Headers interface. type MockHeaders struct { ctrl *gomock.Controller @@ -221,20 +207,6 @@ func (mr *MockHeadersMockRecorder) Exists(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exists", reflect.TypeOf((*MockHeaders)(nil).Exists), arg0) } -// Store mocks base method. -func (m *MockHeaders) Store(arg0 *flow.Header) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Store", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Store indicates an expected call of Store. -func (mr *MockHeadersMockRecorder) Store(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Store", reflect.TypeOf((*MockHeaders)(nil).Store), arg0) -} - // MockPayloads is a mock of Payloads interface. type MockPayloads struct { ctrl *gomock.Controller @@ -273,20 +245,6 @@ func (mr *MockPayloadsMockRecorder) ByBlockID(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ByBlockID", reflect.TypeOf((*MockPayloads)(nil).ByBlockID), arg0) } -// Store mocks base method. -func (m *MockPayloads) Store(arg0 flow.Identifier, arg1 *flow.Payload) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Store", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Store indicates an expected call of Store. -func (mr *MockPayloadsMockRecorder) Store(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Store", reflect.TypeOf((*MockPayloads)(nil).Store), arg0, arg1) -} - // MockCollections is a mock of Collections interface. type MockCollections struct { ctrl *gomock.Controller @@ -310,6 +268,21 @@ func (m *MockCollections) EXPECT() *MockCollectionsMockRecorder { return m.recorder } +// BatchStoreAndIndexByTransaction mocks base method. +func (m *MockCollections) BatchStoreAndIndexByTransaction(arg0 lockctx.Proof, arg1 *flow.Collection, arg2 storage.ReaderBatchWriter) (flow.LightCollection, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BatchStoreAndIndexByTransaction", arg0, arg1, arg2) + ret0, _ := ret[0].(flow.LightCollection) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BatchStoreAndIndexByTransaction indicates an expected call of BatchStoreAndIndexByTransaction. +func (mr *MockCollectionsMockRecorder) BatchStoreAndIndexByTransaction(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchStoreAndIndexByTransaction", reflect.TypeOf((*MockCollections)(nil).BatchStoreAndIndexByTransaction), arg0, arg1, arg2) +} + // ByID mocks base method. func (m *MockCollections) ByID(arg0 flow.Identifier) (*flow.Collection, error) { m.ctrl.T.Helper() @@ -370,11 +343,12 @@ func (mr *MockCollectionsMockRecorder) Remove(arg0 interface{}) *gomock.Call { } // Store mocks base method. -func (m *MockCollections) Store(arg0 *flow.Collection) error { +func (m *MockCollections) Store(arg0 *flow.Collection) (flow.LightCollection, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Store", arg0) - ret0, _ := ret[0].(error) - return ret0 + ret0, _ := ret[0].(flow.LightCollection) + ret1, _ := ret[1].(error) + return ret0, ret1 } // Store indicates an expected call of Store. @@ -383,18 +357,19 @@ func (mr *MockCollectionsMockRecorder) Store(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Store", reflect.TypeOf((*MockCollections)(nil).Store), arg0) } -// StoreLightAndIndexByTransaction mocks base method. -func (m *MockCollections) StoreLightAndIndexByTransaction(arg0 *flow.LightCollection) error { +// StoreAndIndexByTransaction mocks base method. +func (m *MockCollections) StoreAndIndexByTransaction(arg0 lockctx.Proof, arg1 *flow.Collection) (flow.LightCollection, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StoreLightAndIndexByTransaction", arg0) - ret0, _ := ret[0].(error) - return ret0 + ret := m.ctrl.Call(m, "StoreAndIndexByTransaction", arg0, arg1) + ret0, _ := ret[0].(flow.LightCollection) + ret1, _ := ret[1].(error) + return ret0, ret1 } -// StoreLightAndIndexByTransaction indicates an expected call of StoreLightAndIndexByTransaction. -func (mr *MockCollectionsMockRecorder) StoreLightAndIndexByTransaction(arg0 interface{}) *gomock.Call { +// StoreAndIndexByTransaction indicates an expected call of StoreAndIndexByTransaction. +func (mr *MockCollectionsMockRecorder) StoreAndIndexByTransaction(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreLightAndIndexByTransaction", reflect.TypeOf((*MockCollections)(nil).StoreLightAndIndexByTransaction), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreAndIndexByTransaction", reflect.TypeOf((*MockCollections)(nil).StoreAndIndexByTransaction), arg0, arg1) } // MockCommits is a mock of Commits interface. @@ -435,17 +410,17 @@ func (mr *MockCommitsMockRecorder) BatchRemoveByBlockID(arg0, arg1 interface{}) } // BatchStore mocks base method. -func (m *MockCommits) BatchStore(arg0 flow.Identifier, arg1 flow.StateCommitment, arg2 storage.ReaderBatchWriter) error { +func (m *MockCommits) BatchStore(arg0 lockctx.Proof, arg1 flow.Identifier, arg2 flow.StateCommitment, arg3 storage.ReaderBatchWriter) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BatchStore", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "BatchStore", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // BatchStore indicates an expected call of BatchStore. -func (mr *MockCommitsMockRecorder) BatchStore(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockCommitsMockRecorder) BatchStore(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchStore", reflect.TypeOf((*MockCommits)(nil).BatchStore), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchStore", reflect.TypeOf((*MockCommits)(nil).BatchStore), arg0, arg1, arg2, arg3) } // ByBlockID mocks base method. @@ -463,20 +438,6 @@ func (mr *MockCommitsMockRecorder) ByBlockID(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ByBlockID", reflect.TypeOf((*MockCommits)(nil).ByBlockID), arg0) } -// Store mocks base method. -func (m *MockCommits) Store(arg0 flow.Identifier, arg1 flow.StateCommitment) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Store", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Store indicates an expected call of Store. -func (mr *MockCommitsMockRecorder) Store(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Store", reflect.TypeOf((*MockCommits)(nil).Store), arg0, arg1) -} - // MockEvents is a mock of Events interface. type MockEvents struct { ctrl *gomock.Controller diff --git a/storage/operation/approvals.go b/storage/operation/approvals.go index df38135677b..08903484a34 100644 --- a/storage/operation/approvals.go +++ b/storage/operation/approvals.go @@ -1,28 +1,29 @@ package operation import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/storage" ) -// InsertResultApproval inserts a ResultApproval by ID. -// The same key (`approval.ID()`) necessitates that the value (full `approval`) is -// also identical (otherwise, we would have a successful pre-image attack on our -// cryptographic hash function). Therefore, concurrent calls to this function are safe. -func InsertResultApproval(w storage.Writer, approval *flow.ResultApproval) error { - return UpsertByKey(w, MakePrefix(codeResultApproval, approval.ID()), approval) -} - // RetrieveResultApproval retrieves an approval by ID. // Returns `storage.ErrNotFound` if no Approval with the given ID has been stored. func RetrieveResultApproval(r storage.Reader, approvalID flow.Identifier, approval *flow.ResultApproval) error { return RetrieveByKey(r, MakePrefix(codeResultApproval, approvalID), approval) } -// UnsafeIndexResultApproval inserts a ResultApproval ID keyed by ExecutionResult ID -// and chunk index. -// Unsafe means that it does not check if a different approval is indexed for the same -// chunk, and will overwrite the existing index. +// InsertAndIndexResultApproval atomically performs the following storage operations: +// 1. Store ResultApproval by its ID (in this step, accidental overwrites with inconsistent values +// are prevented by using a collision-resistant hash to derive the key from the value) +// 2. Index approval by the executed chunk, specifically the key pair (ExecutionResultID, chunk index). +// - first, we ensure that no _different_ approval has already been indexed for the same key pair +// - only if the prior check succeeds, we write the index to the database +// // CAUTION: // - In general, the Flow protocol requires multiple approvals for the same chunk from different // verification nodes. In other words, there are multiple different approvals for the same chunk. @@ -30,10 +31,51 @@ func RetrieveResultApproval(r storage.Reader, approvalID flow.Identifier, approv // Verification Nodes for tracking their own approvals (for the same ExecutionResult, a Verifier // will always produce the same approval) // - In order to make sure only one approval is indexed for the chunk, _all calls_ to -// `UnsafeIndexResultApproval` must be synchronized by the higher-logic. Currently, we have the -// convention that `store.ResultApprovals` is the only place that is allowed to call this method. -func UnsafeIndexResultApproval(w storage.Writer, resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) error { - return UpsertByKey(w, MakePrefix(codeIndexResultApprovalByChunk, resultID, chunkIndex), approvalID) +// `InsertAndIndexResultApproval` must be synchronized by the higher-logic. Currently, we have the +// lockctx.Proof to prove the higher logic is holding the lock inserting the approval after checking +// that the approval is not already indexed. +// +// Expected error returns: +// - `storage.ErrDataMismatch` if a *different* approval for the same key pair (ExecutionResultID, chunk index) is already indexed +func InsertAndIndexResultApproval(approval *flow.ResultApproval) func(lctx lockctx.Proof, rw storage.ReaderBatchWriter) error { + approvalID := approval.ID() + resultID := approval.Body.ExecutionResultID + chunkIndex := approval.Body.ChunkIndex + + // the following functors allow encoding to be done before acquiring the lock + inserting := Upserting(MakePrefix(codeResultApproval, approvalID), approval) + indexing := Upserting(MakePrefix(codeIndexResultApprovalByChunk, resultID, chunkIndex), approvalID) + + return func(lctx lockctx.Proof, rw storage.ReaderBatchWriter) error { + if !lctx.HoldsLock(storage.LockIndexResultApproval) { + return fmt.Errorf("missing lock for index result approval for result: %v", resultID) + } + + var storedApprovalID flow.Identifier + err := LookupResultApproval(rw.GlobalReader(), resultID, chunkIndex, &storedApprovalID) + if err == nil { + if storedApprovalID != approvalID { + return fmt.Errorf("attempting to store conflicting approval (result: %v, chunk index: %d): storing: %v, stored: %v. %w", + resultID, chunkIndex, approvalID, storedApprovalID, storage.ErrDataMismatch) + } + return nil // already stored and indexed + } + if !errors.Is(err, storage.ErrNotFound) { // `storage.ErrNotFound` is expected, as this indicates that no receipt is indexed yet; anything else is an exception + return fmt.Errorf("could not lookup result approval ID: %w", irrecoverable.NewException(err)) + } + + err = inserting(rw.Writer()) + if err != nil { + return fmt.Errorf("could not store result approval: %w", err) + } + + err = indexing(rw.Writer()) + if err != nil { + return fmt.Errorf("could not index result approval: %w", err) + } + + return nil + } } // LookupResultApproval finds a ResultApproval by result ID and chunk index. diff --git a/storage/operation/badgerimpl/dbstore.go b/storage/operation/badgerimpl/dbstore.go index 12d2f0fb018..6f1376596b4 100644 --- a/storage/operation/badgerimpl/dbstore.go +++ b/storage/operation/badgerimpl/dbstore.go @@ -27,3 +27,8 @@ func (b *dbStore) WithReaderBatchWriter(fn func(storage.ReaderBatchWriter) error func (b *dbStore) NewBatch() storage.Batch { return NewReaderBatchWriter(b.db) } + +// No errors are expected during normal operation. +func (b *dbStore) Close() error { + return b.db.Close() +} diff --git a/storage/operation/badgerimpl/writer.go b/storage/operation/badgerimpl/writer.go index 148b3254c2d..e22635fb670 100644 --- a/storage/operation/badgerimpl/writer.go +++ b/storage/operation/badgerimpl/writer.go @@ -3,7 +3,6 @@ package badgerimpl import ( "fmt" "slices" - "sync" "github.com/dgraph-io/badger/v2" @@ -22,9 +21,6 @@ type ReaderBatchWriter struct { // for executing callbacks after the batch has been flushed, such as updating caches callbacks *operation.Callbacks - - // for repreventing re-entrant deadlock - locks *operation.BatchLocks } var _ storage.ReaderBatchWriter = (*ReaderBatchWriter)(nil) @@ -51,14 +47,6 @@ func (b *ReaderBatchWriter) BadgerWriteBatch() *badger.WriteBatch { return b.batch } -// Lock tries to acquire the lock for the batch. -// if the lock is already acquired by this same batch from other pending db operations, -// then it will not be blocked and can continue updating the batch, which prevents a re-entrant deadlock. -// CAUTION: The caller must ensure that no other references exist for the input lock. -func (b *ReaderBatchWriter) Lock(lock *sync.Mutex) { - b.locks.Lock(lock, b.callbacks) -} - // AddCallback adds a callback to execute after the batch has been flush // regardless the batch update is succeeded or failed. // The error parameter is the error returned by the batch update. @@ -79,6 +67,7 @@ func (b *ReaderBatchWriter) Commit() error { // Close releases memory of the batch and no error is returned. // This can be called as a defer statement immediately after creating Batch // to reduce risk of unbounded memory consumption. +// No errors are expected during normal operation. func (b *ReaderBatchWriter) Close() error { // BadgerDB v2 docs for WriteBatch.Cancel(): // @@ -117,7 +106,6 @@ func NewReaderBatchWriter(db *badger.DB) *ReaderBatchWriter { globalReader: ToReader(db), batch: db.NewWriteBatch(), callbacks: operation.NewCallbacks(), - locks: operation.NewBatchLocks(), } } @@ -182,13 +170,13 @@ func (b *ReaderBatchWriter) Delete(key []byte) error { // It returns error if endPrefix < startPrefix // no other errors are expected during normal operation func (b *ReaderBatchWriter) DeleteByRange(globalReader storage.Reader, startPrefix, endPrefix []byte) error { - err := operation.Iterate(startPrefix, endPrefix, func(key []byte) error { + err := operation.IterateKeysByPrefixRange(globalReader, startPrefix, endPrefix, func(key []byte) error { err := b.batch.Delete(key) if err != nil { return fmt.Errorf("could not add key to delete batch (%v): %w", key, err) } return nil - })(globalReader) + }) if err != nil { return fmt.Errorf("could not find keys by range to be deleted: %w", err) diff --git a/storage/operation/batch_locks.go b/storage/operation/batch_locks.go deleted file mode 100644 index 969db20f82a..00000000000 --- a/storage/operation/batch_locks.go +++ /dev/null @@ -1,71 +0,0 @@ -package operation - -import "sync" - -// BatchLocks is a struct that holds the locks acquired by a batch, -// which is used to prevent re-entrant deadlock. -// BatchLocks is not safe for concurrent use by multiple goroutines. -// Deprecated: BatchLocks exists to provide deadlock protection as a temporary measure during -// the course of development of the Pebble database layer -- to be replaced prior to release with -// a system without reliance on globally unique mutex references. -type BatchLocks struct { - // CAUTION: this map is keyed by the pointer address of the mutex. Users must ensure - // that only one reference exists to the relevant lock. - acquiredLocks map[*sync.Mutex]struct{} -} - -func NewBatchLocks() *BatchLocks { - return &BatchLocks{ - acquiredLocks: nil, // lazy initialization - } -} - -// Lock tries to acquire a given lock on behalf of the batch. -// -// If the batch has already acquired this lock earlier (recorded in acquiredLocks), -// it skips locking again to avoid unnecessary blocking, allowing the caller to proceed immediately. -// -// If the lock has not been acquired yet, it blocks until the lock is acquired, -// and then records the lock in the acquiredLocks map to indicate ownership. -// -// It also registers a callback to ensure that when the batch operation is finished, -// the lock is properly released and removed from acquiredLocks. -// -// Parameters: -// - lock: The *sync.Mutex to acquire. The common usage of this lock is to prevent -// dirty reads so that the batch writes is writing the correct data. -// In other words, this Lock method is to prevent re-entrant deadlock, while this lock -// mutex is used to prevent dirty reads. -// - callback: A Callbacks collection to which the unlock operation is appended -// so that locks are safely released once the batch processing is complete. -// -// CAUTION: Since locks are identified by pointer address, callers must ensure that no other references exist for the input lock. -func (l *BatchLocks) Lock(lock *sync.Mutex, callback *Callbacks) { - // if the lock is already acquired by this same batch from previous db operations, - // then it will not be blocked and can continue updating the batch, - if l.acquiredLocks != nil { - if _, ok := l.acquiredLocks[lock]; ok { - // the batch is already holding the lock - // so we can just return and the caller is unblock to continue - return - } - } - - // batch never hold this lock before, then trying to acquire it - // this will block until the lock is acquired - lock.Lock() - - if l.acquiredLocks == nil { - l.acquiredLocks = make(map[*sync.Mutex]struct{}) - } - - // once we acquire the lock, we need to add it to the acquired locks so that - // other operations from this batch can be unblocked - l.acquiredLocks[lock] = struct{}{} - - // we need to make sure that the lock is released when the batch is done - callback.AddCallback(func(error) { - delete(l.acquiredLocks, lock) - lock.Unlock() - }) -} diff --git a/storage/operation/batch_locks_test.go b/storage/operation/batch_locks_test.go deleted file mode 100644 index 4fdf90f9b1d..00000000000 --- a/storage/operation/batch_locks_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package operation_test - -import ( - "errors" - "sync" - "testing" - - "github.com/stretchr/testify/require" - "go.uber.org/atomic" - - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/operation" - "github.com/onflow/flow-go/storage/operation/dbtest" -) - -func InsertNewEntity(lock *sync.Mutex, rw storage.ReaderBatchWriter, e Entity) error { - rw.Lock(lock) - - var item Entity - err := operation.RetrieveByKey(rw.GlobalReader(), e.Key(), &item) - if err == nil { - return storage.ErrAlreadyExists - } - - if !errors.Is(err, storage.ErrNotFound) { - return err - } - - return operation.UpsertByKey(rw.Writer(), e.Key(), e) -} - -func TestLockReEntrance(t *testing.T) { - t.Parallel() - - dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { - inserting := sync.Mutex{} - require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - err := InsertNewEntity(&inserting, rw, Entity{ID: 1}) - if err != nil { - return err - } - // Re-entrant call to InsertNewEntity - err = InsertNewEntity(&inserting, rw, Entity{ID: 2}) - if err != nil { - return err - } - - return nil - })) - - var item Entity - require.NoError(t, operation.RetrieveByKey(db.Reader(), Entity{ID: 1}.Key(), &item)) - require.Equal(t, Entity{ID: 1}, item) - - require.NoError(t, operation.RetrieveByKey(db.Reader(), Entity{ID: 2}.Key(), &item)) - require.Equal(t, Entity{ID: 2}, item) - }) -} - -func TestLockSeqential(t *testing.T) { - t.Parallel() - - dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { - inserting := sync.Mutex{} - require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return InsertNewEntity(&inserting, rw, Entity{ID: 1}) - })) - - require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return InsertNewEntity(&inserting, rw, Entity{ID: 2}) - })) - - var item Entity - require.NoError(t, operation.RetrieveByKey(db.Reader(), Entity{ID: 1}.Key(), &item)) - require.Equal(t, Entity{ID: 1}, item) - - require.NoError(t, operation.RetrieveByKey(db.Reader(), Entity{ID: 2}.Key(), &item)) - require.Equal(t, Entity{ID: 2}, item) - }) -} - -func TestLockConcurrentInsert(t *testing.T) { - t.Parallel() - - dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { - var ( - wg sync.WaitGroup - lock sync.Mutex - count = 10 // number of concurrent inserts - ) - - entities := make([]Entity, count) - for i := 0; i < count; i++ { - entities[i] = Entity{ID: uint64(i)} - } - - wg.Add(count) - for i := 0; i < count; i++ { - i := i // capture loop variable - go func() { - defer wg.Done() - err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return InsertNewEntity(&lock, rw, entities[i]) - }) - require.NoError(t, err) - }() - } - - wg.Wait() - - // Verify all entities were inserted correctly - for i := 0; i < count; i++ { - var result Entity - err := operation.RetrieveByKey(db.Reader(), entities[i].Key(), &result) - require.NoError(t, err) - require.Equal(t, entities[i], result) - } - }) -} - -// concurrently inserting the same entity 10 times, should only succeed 1 time, -// and fail 9 times -func TestLockConcurrentInsertError(t *testing.T) { - t.Parallel() - - dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { - var ( - wg sync.WaitGroup - lock sync.Mutex - count = 10 // number of concurrent inserts - ) - - entity := Entity{ID: uint64(1)} - failedCount := atomic.NewInt32(0) - - wg.Add(count) - for i := 0; i < count; i++ { - go func() { - defer wg.Done() - err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return InsertNewEntity(&lock, rw, entity) - }) - - if err != nil { - failedCount.Add(1) - } - }() - } - - wg.Wait() - - // Verify the entity was inserted correctly - var result Entity - err := operation.RetrieveByKey(db.Reader(), entity.Key(), &result) - require.NoError(t, err) - require.Equal(t, entity, result) - - // and failed 9 times - require.Equal(t, int32(9), failedCount.Load(), "expected 9 failed inserts") - }) -} diff --git a/storage/operation/children.go b/storage/operation/children.go new file mode 100644 index 00000000000..eccc2e06f19 --- /dev/null +++ b/storage/operation/children.go @@ -0,0 +1,42 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// UpsertBlockChildren updates the children of the specified parent block ID. +// +// CAUTION: +// - The caller must acquire either the lock [storage.LockInsertBlock] or [storage.LockInsertOrFinalizeClusterBlock] (but not both) +// and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func UpsertBlockChildren(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, childrenIDs flow.IdentifierList) error { + held := lctx.HoldsLock(storage.LockInsertBlock) || lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) + if !held { + return fmt.Errorf("missing required lock: %s or %s", storage.LockInsertBlock, storage.LockInsertOrFinalizeClusterBlock) + } + + return UpsertByKey(w, MakePrefix(codeBlockChildren, blockID), childrenIDs) +} + +// RetrieveBlockChildren retrieves the list of child block IDs for the specified parent block. +// For every known block (at or above the root block height), this index should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a known block +func RetrieveBlockChildren(r storage.Reader, blockID flow.Identifier, childrenIDs *flow.IdentifierList) error { + return RetrieveByKey(r, MakePrefix(codeBlockChildren, blockID), childrenIDs) +} diff --git a/storage/operation/children_test.go b/storage/operation/children_test.go new file mode 100644 index 00000000000..233cacbb256 --- /dev/null +++ b/storage/operation/children_test.go @@ -0,0 +1,48 @@ +package operation_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestBlockChildrenIndexUpdateLookup(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + blockID := unittest.IdentifierFixture() + childrenIDs := unittest.IdentifierListFixture(8) + var retrievedIDs flow.IdentifierList + + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(t, err) + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertBlockChildren(lctx, rw.Writer(), blockID, childrenIDs) + }) + require.NoError(t, err) + lctx.Release() + err = operation.RetrieveBlockChildren(db.Reader(), blockID, &retrievedIDs) + require.NoError(t, err) + assert.Equal(t, childrenIDs, retrievedIDs) + + altIDs := unittest.IdentifierListFixture(4) + lctx = lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertBlock)) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertBlockChildren(lctx, rw.Writer(), blockID, altIDs) + }) + require.NoError(t, err) + lctx.Release() + err = operation.RetrieveBlockChildren(db.Reader(), blockID, &retrievedIDs) + require.NoError(t, err) + assert.Equal(t, altIDs, retrievedIDs) + }) +} diff --git a/storage/operation/cluster.go b/storage/operation/cluster.go new file mode 100644 index 00000000000..a6469635b29 --- /dev/null +++ b/storage/operation/cluster.go @@ -0,0 +1,110 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// This file implements storage functions for chain state book-keeping of +// collection node cluster consensus. In contrast to the corresponding functions +// for regular consensus, these functions include the cluster ID in order to +// support storing multiple chains, for example during epoch switchover. + +// IndexClusterBlockHeight indexes a cluster block from the specified cluster by its height. +func IndexClusterBlockHeight(lctx lockctx.Proof, w storage.Writer, clusterID flow.ChainID, height uint64, blockID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertOrFinalizeClusterBlock) + } + + return UpsertByKey(w, MakePrefix(codeFinalizedCluster, clusterID, height), blockID) +} + +// LookupClusterBlockHeight retrieves a block ID by height for the given cluster +// (only finalized cluster blocks are indexed by height to guarantee uniqueness). +func LookupClusterBlockHeight(r storage.Reader, clusterID flow.ChainID, height uint64, blockID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeFinalizedCluster, clusterID, height), blockID) +} + +// UpsertClusterFinalizedHeight updates (overwrites!) the latest finalized cluster block height for the given cluster. +func UpsertClusterFinalizedHeight(lctx lockctx.Proof, w storage.Writer, clusterID flow.ChainID, number uint64) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertOrFinalizeClusterBlock) + } + return UpsertByKey(w, MakePrefix(codeClusterHeight, clusterID), number) +} + +// RetrieveClusterFinalizedHeight retrieves the latest finalized cluster block height of the given cluster. +func RetrieveClusterFinalizedHeight(r storage.Reader, clusterID flow.ChainID, height *uint64) error { + return RetrieveByKey(r, MakePrefix(codeClusterHeight, clusterID), height) +} + +// IndexReferenceBlockByClusterBlock updates the reference block ID for the given +// cluster block ID. While each cluster block specifies a reference block in its +// payload, we maintain this additional lookup for performance reasons. +func IndexReferenceBlockByClusterBlock(lctx lockctx.Proof, w storage.Writer, clusterBlockID, refID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertOrFinalizeClusterBlock) + } + + // Only need to check if the lock is held, no need to check if is already stored, + // because the duplication check is done when storing a header, which is in the same + // batch update and holding the same lock. + + return UpsertByKey(w, MakePrefix(codeClusterBlockToRefBlock, clusterBlockID), refID) +} + +// LookupReferenceBlockByClusterBlock looks up the reference block ID for the given +// cluster block ID. While each cluster block specifies a reference block in its +// payload, we maintain this additional lookup for performance reasons. +func LookupReferenceBlockByClusterBlock(r storage.Reader, clusterBlockID flow.Identifier, refID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeClusterBlockToRefBlock, clusterBlockID), refID) +} + +// IndexClusterBlockByReferenceHeight indexes a cluster block ID by its reference +// block height. The cluster block ID is included in the key for more efficient +// traversal. Only finalized cluster blocks should be included in this index. +// The key looks like: +func IndexClusterBlockByReferenceHeight(lctx lockctx.Proof, w storage.Writer, refHeight uint64, clusterBlockID flow.Identifier) error { + // Why is this lock necessary? + // A single reference height can correspond to multiple cluster blocks. While we are finalizing blocks, + // we may also be concurrently extending cluster blocks. This leads to simultaneous updates and reads + // on keys sharing the same prefix. To prevent race conditions during these concurrent reads and writes, + // synchronization is required when accessing these keys. + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertOrFinalizeClusterBlock) + } + return UpsertByKey(w, MakePrefix(codeRefHeightToClusterBlock, refHeight, clusterBlockID), nil) +} + +// LookupClusterBlocksByReferenceHeightRange traverses the ref_height->cluster_block +// index and returns any finalized cluster blocks which have a reference block with +// height in the given range. This is used to avoid including duplicate transaction +// when building or validating a new collection. +func LookupClusterBlocksByReferenceHeightRange(lctx lockctx.Proof, r storage.Reader, start, end uint64, clusterBlockIDs *[]flow.Identifier) error { + // Why is this lock necessary? + // A single reference height can correspond to multiple cluster blocks. While we are finalizing blocks, + // we may also be concurrently extending cluster blocks. This leads to simultaneous updates and reads + // on keys sharing the same prefix. To prevent race conditions during these concurrent reads and writes, + // synchronization is required when accessing these keys. + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertOrFinalizeClusterBlock) + } + startPrefix := MakePrefix(codeRefHeightToClusterBlock, start) + endPrefix := MakePrefix(codeRefHeightToClusterBlock, end) + prefixLen := len(startPrefix) + checkFunc := func(key []byte) error { + clusterBlockIDBytes := key[prefixLen:] + var clusterBlockID flow.Identifier + copy(clusterBlockID[:], clusterBlockIDBytes) + *clusterBlockIDs = append(*clusterBlockIDs, clusterBlockID) + + // the info we need is stored in the key, never process the value + return nil + } + + return IterateKeysByPrefixRange(r, startPrefix, endPrefix, checkFunc) +} diff --git a/storage/operation/cluster_test.go b/storage/operation/cluster_test.go new file mode 100644 index 00000000000..de200efc02b --- /dev/null +++ b/storage/operation/cluster_test.go @@ -0,0 +1,368 @@ +package operation_test + +import ( + "math/rand" + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestClusterHeights(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + var ( + clusterID flow.ChainID = "cluster" + height uint64 = 42 + expected = unittest.IdentifierFixture() + err error + ) + + t.Run("retrieve non-existent", func(t *testing.T) { + var actual flow.Identifier + err = operation.LookupClusterBlockHeight(db.Reader(), clusterID, height, &actual) + t.Log(err) + assert.ErrorIs(t, err, storage.ErrNotFound) + }) + + t.Run("insert/retrieve", func(t *testing.T) { + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockHeight(lctx, rw.Writer(), clusterID, height, expected) + }) + lctx.Release() + assert.NoError(t, err) + + var actual flow.Identifier + err = operation.LookupClusterBlockHeight(db.Reader(), clusterID, height, &actual) + assert.NoError(t, err) + assert.Equal(t, expected, actual) + }) + + t.Run("multiple chain IDs", func(t *testing.T) { + // use different cluster ID but same block height + // - we first index *all* three blocks from different clusters for the same height + // - then we retrieve *all* three block IDs in a second step + // First writing all three is important to detect bugs, where the logic ignores the cluster ID + // and only memorizes the latest block stored for a given height (irrespective of cluster ID). + clusterBlockIDs := unittest.IdentifierListFixture(3) + clusterIDs := []flow.ChainID{"cluster-0", "cluster-1", "cluster-2"} + var actual flow.Identifier + for i := 0; i < len(clusterBlockIDs); i++ { + err = operation.LookupClusterBlockHeight(db.Reader(), clusterIDs[i], height, &actual) + assert.ErrorIs(t, err, storage.ErrNotFound) + + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockHeight(lctx, rw.Writer(), clusterIDs[i], height, clusterBlockIDs[i]) + }) + lctx.Release() // Release lock immediately after operation + assert.NoError(t, err) + } + for i := 0; i < len(clusterBlockIDs); i++ { + err = operation.LookupClusterBlockHeight(db.Reader(), clusterIDs[i], height, &actual) + assert.NoError(t, err) + assert.Equal(t, clusterBlockIDs[i], actual) + } + }) + }) +} + +// Test_RetrieveClusterFinalizedHeight verifies proper retrieval of the latest finalized cluster block height. +func Test_RetrieveClusterFinalizedHeight(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + var ( + clusterID flow.ChainID = "cluster" + expected uint64 = 42 + err error + ) + + t.Run("retrieve non-existant", func(t *testing.T) { + var actual uint64 + err = operation.RetrieveClusterFinalizedHeight(db.Reader(), clusterID, &actual) + t.Log(err) + assert.ErrorIs(t, err, storage.ErrNotFound) + }) + + t.Run("insert/retrieve", func(t *testing.T) { + + unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertClusterFinalizedHeight(lctx, rw.Writer(), clusterID, 21) + }) + }) + + unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertClusterFinalizedHeight(lctx, rw.Writer(), clusterID, expected) + }) + }) + + var actual uint64 + err = operation.RetrieveClusterFinalizedHeight(db.Reader(), clusterID, &actual) + assert.NoError(t, err) + assert.Equal(t, expected, actual) + }) + + t.Run("multiple chain IDs", func(t *testing.T) { + // persist latest finalized cluster block height for three different collector clusters + // - we first index *all* three latest finalized block heights from different clusters + // - then we retrieve all three latest finalized block heights in a second step + // First writing all three is important to detect bugs, where the logic ignores the cluster ID + // and only memorizes the last value stored (irrespective of cluster ID). + clusterFinalizedHeights := []uint64{117, 11, 791} + clusterIDs := []flow.ChainID{"cluster-0", "cluster-1", "cluster-2"} + var actual uint64 + for i := 0; i < len(clusterFinalizedHeights); i++ { + err = operation.RetrieveClusterFinalizedHeight(db.Reader(), clusterIDs[i], &actual) + assert.ErrorIs(t, err, storage.ErrNotFound) + + unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertClusterFinalizedHeight(lctx, rw.Writer(), clusterIDs[i], clusterFinalizedHeights[i]) + }) + }) + } + for i := 0; i < len(clusterFinalizedHeights); i++ { + err = operation.RetrieveClusterFinalizedHeight(db.Reader(), clusterIDs[i], &actual) + assert.NoError(t, err) + assert.Equal(t, clusterFinalizedHeights[i], actual) + } + }) + }) +} + +func TestClusterBlockByReferenceHeight(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + t.Run("should be able to index cluster block by reference height", func(t *testing.T) { + id := unittest.IdentifierFixture() + height := rand.Uint64() + unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockByReferenceHeight(lctx, rw.Writer(), height, id) + }) + }) + + var retrieved []flow.Identifier + unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), height, height, &retrieved) + }) + require.Len(t, retrieved, 1) + assert.Equal(t, id, retrieved[0]) + }) + }) + + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + t.Run("should be able to index multiple cluster blocks at same reference height", func(t *testing.T) { + ids := unittest.IdentifierListFixture(10) + height := rand.Uint64() + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + defer lctx.Release() + for _, id := range ids { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockByReferenceHeight(lctx, rw.Writer(), height, id) + }) + assert.NoError(t, err) + } + + var retrieved []flow.Identifier + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), height, height, &retrieved) + assert.NoError(t, err) + assert.Len(t, retrieved, len(ids)) + assert.ElementsMatch(t, ids, retrieved) + }) + }) + + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + t.Run("should be able to lookup cluster blocks across height range", func(t *testing.T) { + ids := unittest.IdentifierListFixture(100) + nextHeight := rand.Uint64() + // keep track of height range + minHeight, maxHeight := nextHeight, nextHeight + // keep track of which ids are indexed at each nextHeight + lookup := make(map[uint64][]flow.Identifier) + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + defer lctx.Release() + + for i := 0; i < len(ids); i++ { + // randomly adjust the nextHeight, increasing on average + r := rand.Intn(100) + if r < 20 { + nextHeight -= 1 // 20% probability + } else if r < 40 { + // 20% probability: nextHeight stays the same + } else if r < 80 { + nextHeight += 1 // 40% probability + } else { + nextHeight += 2 // 20% probability + } + + lookup[nextHeight] = append(lookup[nextHeight], ids[i]) + if nextHeight < minHeight { + minHeight = nextHeight + } + if nextHeight > maxHeight { + maxHeight = nextHeight + } + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockByReferenceHeight(lctx, rw.Writer(), nextHeight, ids[i]) + }) + assert.NoError(t, err) + } + + // determine which ids we expect to be retrieved for a given height range + idsInHeightRange := func(min, max uint64) []flow.Identifier { + var idsForHeight []flow.Identifier + for height, id := range lookup { + if min <= height && height <= max { + idsForHeight = append(idsForHeight, id...) + } + } + return idsForHeight + } + + // Test cases are described as follows: + // {---} represents the queried height range + // [---] represents the indexed height range + // [{ means the left endpoint of both ranges are the same + // {-[ means the left endpoint of the queried range is strictly less than the indexed range + t.Run("{-}--[-]", func(t *testing.T) { + var retrieved []flow.Identifier + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), minHeight-100, minHeight-1, &retrieved) + assert.NoError(t, err) + assert.Len(t, retrieved, 0) + }) + t.Run("{-[--}-]", func(t *testing.T) { + var retrieved []flow.Identifier + min := minHeight - 100 + max := minHeight + (maxHeight-minHeight)/2 + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), min, max, &retrieved) + assert.NoError(t, err) + + expected := idsInHeightRange(min, max) + assert.NotEmpty(t, expected, "test assumption broken") + assert.Len(t, retrieved, len(expected)) + assert.ElementsMatch(t, expected, retrieved) + }) + t.Run("{[--}--]", func(t *testing.T) { + var retrieved []flow.Identifier + min := minHeight + max := minHeight + (maxHeight-minHeight)/2 + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), min, max, &retrieved) + assert.NoError(t, err) + + expected := idsInHeightRange(min, max) + assert.NotEmpty(t, expected, "test assumption broken") + assert.Len(t, retrieved, len(expected)) + assert.ElementsMatch(t, expected, retrieved) + }) + t.Run("[-{--}-]", func(t *testing.T) { + var retrieved []flow.Identifier + min := minHeight + 1 + max := maxHeight - 1 + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), min, max, &retrieved) + assert.NoError(t, err) + + expected := idsInHeightRange(min, max) + assert.NotEmpty(t, expected, "test assumption broken") + assert.Len(t, retrieved, len(expected)) + assert.ElementsMatch(t, expected, retrieved) + }) + t.Run("[{----}]", func(t *testing.T) { + var retrieved []flow.Identifier + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), minHeight, maxHeight, &retrieved) + assert.NoError(t, err) + + expected := idsInHeightRange(minHeight, maxHeight) + assert.NotEmpty(t, expected, "test assumption broken") + assert.Len(t, retrieved, len(expected)) + assert.ElementsMatch(t, expected, retrieved) + }) + t.Run("[--{--}]", func(t *testing.T) { + var retrieved []flow.Identifier + min := minHeight + (maxHeight-minHeight)/2 + max := maxHeight + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), min, max, &retrieved) + assert.NoError(t, err) + + expected := idsInHeightRange(min, max) + assert.NotEmpty(t, expected, "test assumption broken") + assert.Len(t, retrieved, len(expected)) + assert.ElementsMatch(t, expected, retrieved) + }) + t.Run("[-{--]-}", func(t *testing.T) { + var retrieved []flow.Identifier + min := minHeight + (maxHeight-minHeight)/2 + max := maxHeight + 100 + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), min, max, &retrieved) + assert.NoError(t, err) + + expected := idsInHeightRange(min, max) + assert.NotEmpty(t, expected, "test assumption broken") + assert.Len(t, retrieved, len(expected)) + assert.ElementsMatch(t, expected, retrieved) + }) + t.Run("[-]--{-}", func(t *testing.T) { + var retrieved []flow.Identifier + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), maxHeight+1, maxHeight+100, &retrieved) + assert.NoError(t, err) + assert.Len(t, retrieved, 0) + }) + }) + }) +} + +// expected average case # of blocks to lookup on Mainnet +func BenchmarkLookupClusterBlocksByReferenceHeightRange_1200(b *testing.B) { + benchmarkLookupClusterBlocksByReferenceHeightRange(b, 1200) +} + +// 5x average case on Mainnet +func BenchmarkLookupClusterBlocksByReferenceHeightRange_6_000(b *testing.B) { + benchmarkLookupClusterBlocksByReferenceHeightRange(b, 6_000) +} + +func BenchmarkLookupClusterBlocksByReferenceHeightRange_100_000(b *testing.B) { + benchmarkLookupClusterBlocksByReferenceHeightRange(b, 100_000) +} + +func benchmarkLookupClusterBlocksByReferenceHeightRange(b *testing.B, n int) { + lockManager := storage.NewTestingLockManager() + dbtest.BenchWithStorages(b, func(b *testing.B, r storage.Reader, wr dbtest.WithWriter) { + for i := 0; i < n; i++ { + lctx := lockManager.NewContext() + require.NoError(b, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + err := wr(func(w storage.Writer) error { + return operation.IndexClusterBlockByReferenceHeight(lctx, w, rand.Uint64()%1000, unittest.IdentifierFixture()) + }) + require.NoError(b, err) + lctx.Release() + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + var blockIDs []flow.Identifier + lctx := lockManager.NewContext() + require.NoError(b, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, r, 0, 1000, &blockIDs) + require.NoError(b, err) + lctx.Release() + } + }) +} diff --git a/storage/operation/collections.go b/storage/operation/collections.go index 76b6b6461f6..818f6680f0e 100644 --- a/storage/operation/collections.go +++ b/storage/operation/collections.go @@ -1,6 +1,10 @@ package operation import ( + "fmt" + + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" ) @@ -9,59 +13,117 @@ import ( // to the constituent transactions. They do not modify transactions contained // by the collections. -// UpsertCollection inserts a light collection into the storage. -// If the collection already exists, it will be overwritten. +// UpsertCollection inserts a [flow.LightCollection] into the storage, keyed by its ID. +// +// If the collection already exists, it will be overwritten. Note that here, the key (collection ID) is derived +// from the value (collection) via a collision-resistant hash function. Hence, unchecked overwrites pose no risk +// of data corruption, because for the same key, we expect the same value. +// +// No errors are expected during normal operation. func UpsertCollection(w storage.Writer, collection *flow.LightCollection) error { return UpsertByKey(w, MakePrefix(codeCollection, collection.ID()), collection) } +// RetrieveCollection retrieves a [flow.LightCollection] by its ID. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no collection with the specified ID is known. func RetrieveCollection(r storage.Reader, collID flow.Identifier, collection *flow.LightCollection) error { return RetrieveByKey(r, MakePrefix(codeCollection, collID), collection) } // RemoveCollection removes a collection from the storage. +// CAUTION: this is for recovery purposes only, and should not be used during normal operations! // It returns nil if the collection does not exist. // No errors are expected during normal operation. func RemoveCollection(w storage.Writer, collID flow.Identifier) error { return RemoveByKey(w, MakePrefix(codeCollection, collID)) } -// IndexCollectionPayload will overwrite any existing index, which is acceptable -// because the blockID is derived from txIDs within the payload, ensuring its uniqueness. -func IndexCollectionPayload(w storage.Writer, blockID flow.Identifier, txIDs []flow.Identifier) error { - return UpsertByKey(w, MakePrefix(codeIndexCollection, blockID), txIDs) +// IndexCollectionPayload populates the map from a cluster block ID to the batch of transactions it contains. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertOrFinalizeClusterBlock] and hold it until the database write has been +// committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexCollectionPayload(lctx lockctx.Proof, w storage.Writer, clusterBlockID flow.Identifier, txIDs []flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertOrFinalizeClusterBlock) + } + return UpsertByKey(w, MakePrefix(codeIndexCollection, clusterBlockID), txIDs) } -// LookupCollection looks up the collection for a given cluster payload. -func LookupCollectionPayload(r storage.Reader, blockID flow.Identifier, txIDs *[]flow.Identifier) error { - return RetrieveByKey(r, MakePrefix(codeIndexCollection, blockID), txIDs) +// LookupCollectionPayload retrieves the list of transaction IDs that constitute the payload of the specified cluster block. +// For every known cluster block, this index should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `clusterBlockID` does not refer to a known cluster block +func LookupCollectionPayload(r storage.Reader, clusterBlockID flow.Identifier, txIDs *[]flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeIndexCollection, clusterBlockID), txIDs) } -// RemoveCollectionPayloadIndices removes a collection id indexed by a block id +// RemoveCollectionPayloadIndices removes a collection id indexed by a block id. +// CAUTION: this is for recovery purposes only, and should not be used during normal operations! +// It returns nil if the collection does not exist. // No errors are expected during normal operation. func RemoveCollectionPayloadIndices(w storage.Writer, blockID flow.Identifier) error { return RemoveByKey(w, MakePrefix(codeIndexCollection, blockID)) } -// UnsafeIndexCollectionByTransaction inserts a collection id keyed by a transaction id -// Unsafe because a transaction can belong to multiple collections, indexing collection by a transaction -// will overwrite the previous collection id that was indexed by the same transaction id -// To prevent overwritting, the caller must check if the transaction is already indexed, and make sure there -// is no dirty read before the writing by using locks. -func UnsafeIndexCollectionByTransaction(w storage.Writer, txID flow.Identifier, collectionID flow.Identifier) error { +// IndexCollectionByTransaction indexes the given collection ID, keyed by the transaction ID. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertCollection] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// WARNING, this index is NOT BFT in its current form: +// Honest clusters ensure a transaction can only belong to one collection. However, in rare +// cases, the collector clusters can exceed byzantine thresholds -- making it possible to +// produce multiple finalized collections (aka guaranteed collections) containing the same +// transaction repeatedly. +// TODO: eventually we need to handle Byzantine clusters +// +// No errors are expected during normal operation. +func IndexCollectionByTransaction(lctx lockctx.Proof, w storage.Writer, txID flow.Identifier, collectionID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertCollection) { + return fmt.Errorf("missing lock: %v", storage.LockInsertCollection) + } + return UpsertByKey(w, MakePrefix(codeIndexCollectionByTransaction, txID), collectionID) } -// LookupCollectionByTransaction looks up the collection indexed by the given transaction ID, -// which is the collection in which the given transaction was included. -// It returns storage.ErrNotFound if the collection is not found. -// No errors are expected during normal operaion. +// LookupCollectionByTransaction retrieves the collection ID for the collection that contains the specified transaction. +// For every known transaction, this index should be populated. +// +// WARNING, this index is NOT BFT in its current form: +// Honest clusters ensure a transaction can only belong to one collection. However, in rare +// cases, the collector clusters can exceed byzantine thresholds -- making it possible to +// produce multiple finalized collections (aka guaranteed collections) containing the same +// transaction repeatedly. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `txID` does not refer to a known transaction func LookupCollectionByTransaction(r storage.Reader, txID flow.Identifier, collectionID *flow.Identifier) error { return RetrieveByKey(r, MakePrefix(codeIndexCollectionByTransaction, txID), collectionID) } -// RemoveCollectionByTransactionIndex removes a collection id indexed by a transaction id, -// created by [UnsafeIndexCollectionByTransaction]. +// RemoveCollectionByTransactionIndex removes an entry in the index from transaction ID to collection containing the transaction. +// CAUTION: this is for recovery purposes only, and should not be used during normal operations! +// It returns nil if the collection does not exist. // No errors are expected during normal operation. func RemoveCollectionTransactionIndices(w storage.Writer, txID flow.Identifier) error { return RemoveByKey(w, MakePrefix(codeIndexCollectionByTransaction, txID)) diff --git a/storage/operation/collections_test.go b/storage/operation/collections_test.go index fc09e07cde0..401d173adcd 100644 --- a/storage/operation/collections_test.go +++ b/storage/operation/collections_test.go @@ -16,6 +16,7 @@ import ( func TestCollections(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { expected := unittest.CollectionFixture(2).Light() + lockManager := storage.NewTestingLockManager() t.Run("Retrieve nonexistant", func(t *testing.T) { var actual flow.LightCollection @@ -59,10 +60,13 @@ func TestCollections(t *testing.T) { expected := unittest.CollectionFixture(1).Light() blockID := unittest.IdentifierFixture() + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + defer lctx.Release() _ = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := operation.UpsertCollection(rw.Writer(), &expected) assert.NoError(t, err) - err = operation.IndexCollectionPayload(rw.Writer(), blockID, expected.Transactions) + err = operation.IndexCollectionPayload(lctx, rw.Writer(), blockID, expected.Transactions) assert.NoError(t, err) return nil }) @@ -78,8 +82,11 @@ func TestCollections(t *testing.T) { transactionID := unittest.IdentifierFixture() actual := flow.Identifier{} + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertCollection)) + defer lctx.Release() _ = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - err := operation.UnsafeIndexCollectionByTransaction(rw.Writer(), transactionID, expected) + err := operation.IndexCollectionByTransaction(lctx, rw.Writer(), transactionID, expected) assert.NoError(t, err) return nil }) diff --git a/storage/operation/commits.go b/storage/operation/commits.go index 1f80af9a768..b964a5c19d8 100644 --- a/storage/operation/commits.go +++ b/storage/operation/commits.go @@ -1,6 +1,11 @@ package operation import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" ) @@ -8,8 +13,26 @@ import ( // IndexStateCommitment indexes a state commitment. // // State commitments are keyed by the block whose execution results in the state with the given commit. -func IndexStateCommitment(w storage.Writer, blockID flow.Identifier, commit flow.StateCommitment) error { - return UpsertByKey(w, MakePrefix(codeCommit, blockID), commit) +// It returns [storage.ErrDataMismatch] if the commit already exists with a different value. +func IndexStateCommitment(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, commit flow.StateCommitment) error { + if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { + return fmt.Errorf("cannot index state commitment without holding lock %s", storage.LockInsertOwnReceipt) + } + + var existingCommit flow.StateCommitment + err := LookupStateCommitment(rw.GlobalReader(), blockID, &existingCommit) + if err == nil { + if existingCommit == commit { + // The commit already exists, no need to index again + return nil + } + return fmt.Errorf("commit for block %v already exists with different value, (existing: %v, new: %v), %w", blockID, + existingCommit, commit, storage.ErrDataMismatch) + } else if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("could not check existing state commitment: %w", err) + } + + return UpsertByKey(rw.Writer(), MakePrefix(codeCommit, blockID), commit) } // LookupStateCommitment gets a state commitment keyed by block ID diff --git a/storage/operation/commits_test.go b/storage/operation/commits_test.go index bee8dd2b21e..c1fa6197a3c 100644 --- a/storage/operation/commits_test.go +++ b/storage/operation/commits_test.go @@ -14,10 +14,14 @@ import ( func TestStateCommitments(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() expected := unittest.StateCommitmentFixture() id := unittest.IdentifierFixture() + lctx := lockManager.NewContext() + defer lctx.Release() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return operation.IndexStateCommitment(rw.Writer(), id, expected) + return operation.IndexStateCommitment(lctx, rw, id, expected) })) var actual flow.StateCommitment diff --git a/storage/operation/computation_result.go b/storage/operation/computation_result.go index 62dcc19b651..fae8a241fcb 100644 --- a/storage/operation/computation_result.go +++ b/storage/operation/computation_result.go @@ -27,24 +27,18 @@ func GetComputationResultUploadStatus(r storage.Reader, blockID flow.Identifier, // GetBlockIDsByStatus returns all IDs of stored ComputationResult instances. func GetBlockIDsByStatus(r storage.Reader, blockIDs *[]flow.Identifier, targetUploadStatus bool) error { - return TraverseByPrefix(r, MakePrefix(codeComputationResults), func() (CheckFunc, CreateFunc, HandleFunc) { - var currKey flow.Identifier - check := func(key []byte) (bool, error) { - currKey = flow.HashToID(key[1:]) - return true, nil - } - + iterationFunc := func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { var wasUploadCompleted bool - create := func() interface{} { - return &wasUploadCompleted + err = getValue(&wasUploadCompleted) + if err != nil { + return true, err } - handle := func() error { - if blockIDs != nil && wasUploadCompleted == targetUploadStatus { - *blockIDs = append(*blockIDs, currKey) - } - return nil + if wasUploadCompleted == targetUploadStatus { + *blockIDs = append(*blockIDs, flow.HashToID(keyCopy[1:])) } - return check, create, handle - }, storage.DefaultIteratorOptions()) + return false, nil + } + + return TraverseByPrefix(r, MakePrefix(codeComputationResults), iterationFunc, storage.DefaultIteratorOptions()) } diff --git a/storage/operation/dbtest/helper.go b/storage/operation/dbtest/helper.go index cb6c7d20a18..81992a5af10 100644 --- a/storage/operation/dbtest/helper.go +++ b/storage/operation/dbtest/helper.go @@ -3,7 +3,7 @@ package dbtest import ( "testing" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -99,17 +99,9 @@ func BenchWithStorages(t *testing.B, fn func(*testing.B, storage.Reader, WithWri func runWithBadger(fn func(storage.Reader, WithWriter)) func(*badger.DB) { return func(db *badger.DB) { withWriter := func(writing func(storage.Writer) error) error { - writer := badgerimpl.NewReaderBatchWriter(db) - err := writing(writer) - if err != nil { - return err - } - - err = writer.Commit() - if err != nil { - return err - } - return nil + return badgerimpl.ToDB(db).WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return writing(rw.Writer()) + }) } reader := badgerimpl.ToReader(db) @@ -120,17 +112,9 @@ func runWithBadger(fn func(storage.Reader, WithWriter)) func(*badger.DB) { func runWithPebble(fn func(storage.Reader, WithWriter)) func(*pebble.DB) { return func(db *pebble.DB) { withWriter := func(writing func(storage.Writer) error) error { - writer := pebbleimpl.NewReaderBatchWriter(db) - err := writing(writer) - if err != nil { - return err - } - - err = writer.Commit() - if err != nil { - return err - } - return nil + return pebbleimpl.ToDB(db).WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return writing(rw.Writer()) + }) } reader := pebbleimpl.ToReader(db) diff --git a/storage/operation/epoch.go b/storage/operation/epoch.go new file mode 100644 index 00000000000..1735fe2afce --- /dev/null +++ b/storage/operation/epoch.go @@ -0,0 +1,22 @@ +package operation + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +func InsertEpochSetup(w storage.Writer, eventID flow.Identifier, event *flow.EpochSetup) error { + return UpsertByKey(w, MakePrefix(codeEpochSetup, eventID), event) +} + +func RetrieveEpochSetup(r storage.Reader, eventID flow.Identifier, event *flow.EpochSetup) error { + return RetrieveByKey(r, MakePrefix(codeEpochSetup, eventID), event) +} + +func InsertEpochCommit(w storage.Writer, eventID flow.Identifier, event *flow.EpochCommit) error { + return UpsertByKey(w, MakePrefix(codeEpochCommit, eventID), event) +} + +func RetrieveEpochCommit(r storage.Reader, eventID flow.Identifier, event *flow.EpochCommit) error { + return RetrieveByKey(r, MakePrefix(codeEpochCommit, eventID), event) +} diff --git a/storage/operation/epoch_protocol_state.go b/storage/operation/epoch_protocol_state.go new file mode 100644 index 00000000000..f493b836cab --- /dev/null +++ b/storage/operation/epoch_protocol_state.go @@ -0,0 +1,36 @@ +package operation + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertEpochProtocolState inserts an epoch protocol state entry by ID. +// Error returns: +// - generic error in case of unexpected failure from the database layer or encoding failure. +func InsertEpochProtocolState(w storage.Writer, entryID flow.Identifier, entry *flow.MinEpochStateEntry) error { + return UpsertByKey(w, MakePrefix(codeEpochProtocolState, entryID), entry) +} + +// RetrieveEpochProtocolState retrieves an epoch protocol state entry by ID. +// Error returns: +// - storage.ErrNotFound if the key does not exist in the database +// - generic error in case of unexpected failure from the database layer +func RetrieveEpochProtocolState(r storage.Reader, entryID flow.Identifier, entry *flow.MinEpochStateEntry) error { + return RetrieveByKey(r, MakePrefix(codeEpochProtocolState, entryID), entry) +} + +// IndexEpochProtocolState indexes an epoch protocol state entry by block ID. +// Error returns: +// - generic error in case of unexpected failure from the database layer or encoding failure. +func IndexEpochProtocolState(w storage.Writer, blockID flow.Identifier, epochProtocolStateEntryID flow.Identifier) error { + return UpsertByKey(w, MakePrefix(codeEpochProtocolStateByBlockID, blockID), epochProtocolStateEntryID) +} + +// LookupEpochProtocolState finds an epoch protocol state entry ID by block ID. +// Error returns: +// - storage.ErrNotFound if the key does not exist in the database +// - generic error in case of unexpected failure from the database layer +func LookupEpochProtocolState(r storage.Reader, blockID flow.Identifier, epochProtocolStateEntryID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeEpochProtocolStateByBlockID, blockID), epochProtocolStateEntryID) +} diff --git a/storage/badger/operation/epoch_protocol_state_test.go b/storage/operation/epoch_protocol_state_test.go similarity index 51% rename from storage/badger/operation/epoch_protocol_state_test.go rename to storage/operation/epoch_protocol_state_test.go index 11aa330c1e2..52ce058d8bc 100644 --- a/storage/badger/operation/epoch_protocol_state_test.go +++ b/storage/operation/epoch_protocol_state_test.go @@ -1,37 +1,43 @@ -package operation +package operation_test import ( "testing" - "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" "github.com/onflow/flow-go/utils/unittest" ) // TestInsertProtocolState tests if basic badger operations on EpochProtocolState work as expected. func TestInsertEpochProtocolState(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { expected := unittest.EpochStateFixture().MinEpochStateEntry epochProtocolStateEntryID := expected.ID() - err := db.Update(InsertEpochProtocolState(epochProtocolStateEntryID, expected)) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertEpochProtocolState(rw.Writer(), epochProtocolStateEntryID, expected) + }) require.NoError(t, err) var actual flow.MinEpochStateEntry - err = db.View(RetrieveEpochProtocolState(epochProtocolStateEntryID, &actual)) + err = operation.RetrieveEpochProtocolState(db.Reader(), epochProtocolStateEntryID, &actual) require.NoError(t, err) assert.Equal(t, expected, &actual) blockID := unittest.IdentifierFixture() - err = db.Update(IndexEpochProtocolState(blockID, epochProtocolStateEntryID)) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexEpochProtocolState(rw.Writer(), blockID, epochProtocolStateEntryID) + }) require.NoError(t, err) var actualProtocolStateID flow.Identifier - err = db.View(LookupEpochProtocolState(blockID, &actualProtocolStateID)) + err = operation.LookupEpochProtocolState(db.Reader(), blockID, &actualProtocolStateID) require.NoError(t, err) assert.Equal(t, epochProtocolStateEntryID, actualProtocolStateID) diff --git a/storage/operation/events.go b/storage/operation/events.go index 0a444408402..f07467fe6db 100644 --- a/storage/operation/events.go +++ b/storage/operation/events.go @@ -46,40 +46,30 @@ func RemoveEventsByBlockID(r storage.Reader, w storage.Writer, blockID flow.Iden } // eventIterationFunc returns an in iteration function which returns all events found during traversal or iteration -func eventIterationFunc(events *[]flow.Event) func() (CheckFunc, CreateFunc, HandleFunc) { - return func() (CheckFunc, CreateFunc, HandleFunc) { - check := func(key []byte) (bool, error) { - return true, nil +func eventIterationFunc(events *[]flow.Event) IterationFunc { + return func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { + var event flow.Event + err = getValue(&event) + if err != nil { + return true, err } - var val flow.Event - create := func() interface{} { - return &val - } - handle := func() error { - *events = append(*events, val) - return nil - } - return check, create, handle + *events = append(*events, event) + return false, nil } } // eventFilterIterationFunc returns an iteration function which filters the result by the given event type in the handleFunc -func eventFilterIterationFunc(events *[]flow.Event, eventType flow.EventType) func() (CheckFunc, CreateFunc, HandleFunc) { - return func() (CheckFunc, CreateFunc, HandleFunc) { - check := func(key []byte) (bool, error) { - return true, nil - } - var val flow.Event - create := func() interface{} { - return &val +func eventFilterIterationFunc(events *[]flow.Event, eventType flow.EventType) IterationFunc { + return func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { + var event flow.Event + err = getValue(&event) + if err != nil { + return true, err } - handle := func() error { - // filter out all events not of type eventType - if val.Type == eventType { - *events = append(*events, val) - } - return nil + // filter out all events not of type eventType + if event.Type == eventType { + *events = append(*events, event) } - return check, create, handle + return false, nil } } diff --git a/storage/operation/executed.go b/storage/operation/executed.go index 3c3e126586b..f8f2c1d5d17 100644 --- a/storage/operation/executed.go +++ b/storage/operation/executed.go @@ -5,6 +5,36 @@ import ( "github.com/onflow/flow-go/storage" ) +// UpdateExecutedBlock updates the pointer to the Execution Node's OWN highest executed block. We +// overwrite the block ID of the most recently executed block, regardless of whether this block may +// later be orphaned or is already orphaned. +// +// ## Usage Context +// - The stored "last executed block" may reference a block on a fork that is later orphaned. +// - This is acceptable and expected: the index is intended for reporting execution metrics and +// for optimizing the loading of unexecuted blocks on node startup. +// - On startup, the Execution Node may use the latest executed block as a hint on where to +// restart the execution. It MUST traverse from the last executed in the direction of decreasing +// height. It will eventually reach a block with a finalized seal. From this block, the Execution +// Node should restart its execution and cover _all_ descendants (that are not orphaned). Thereby, +// we guarantee that even if the stored block is on a fork, we eventually also cover blocks +// are finalized or and the most recent still unfinalized blocks. +// - If the block referenced as "highest executed block" is not on the canonical chain, the Execution +// Node may (re-)execute some blocks unnecessarily, but this does not affect correctness. +// +// ## Limitations & Edge Cases +// - The value is not guaranteed to be on the finalized chain. +// - Forks of arbitrary length may occur; the stored block may be on any such fork. +// +// ## Correct Usage +// - Use for metrics (e.g., reporting latest executed block height). +// - Use for optimizing block execution on startup (as a performance hint). +// +// ## Incorrect Usage +// - Do not use as a source of truth for canonical chain state. +// - Do not disregard blocks with lower heights as not needing execution. +// +// See project documentation in `engine/execution/ingestion/loader/unexecuted_loader.go` for details on startup traversal logic. func UpdateExecutedBlock(w storage.Writer, blockID flow.Identifier) error { return UpsertByKey(w, MakePrefix(codeExecutedBlock), blockID) } diff --git a/storage/operation/execution_fork_evidence.go b/storage/operation/execution_fork_evidence.go index 0552bfb7abd..9e786c23957 100644 --- a/storage/operation/execution_fork_evidence.go +++ b/storage/operation/execution_fork_evidence.go @@ -21,6 +21,12 @@ func RetrieveExecutionForkEvidence(r storage.Reader, conflictingSeals *[]*flow.I return RetrieveByKey(r, MakePrefix(codeExecutionFork), conflictingSeals) } +// RemoveExecutionForkEvidence deletes conflicting seals record from the database. +// No errors are expected during normal operations. +func RemoveExecutionForkEvidence(w storage.Writer) error { + return RemoveByKey(w, MakePrefix(codeExecutionFork)) +} + // InsertExecutionForkEvidence upserts conflicting seals to the database. // If a record already exists, it is overwritten; otherwise a new record is created. // No errors are expected during normal operations. diff --git a/storage/operation/guarantees.go b/storage/operation/guarantees.go new file mode 100644 index 00000000000..913d4c1b673 --- /dev/null +++ b/storage/operation/guarantees.go @@ -0,0 +1,73 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// UnsafeInsertGuarantee inserts a [flow.CollectionGuarantee] into the database, keyed by the collection ID. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exists. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No other errors are expected during normal operation. +func UnsafeInsertGuarantee(lctx lockctx.Proof, w storage.Writer, collID flow.Identifier, guarantee *flow.CollectionGuarantee) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot insert guarantee %s for collection %s without holding lock %s", + guarantee.ID(), collID, storage.LockInsertBlock) + } + + return UpsertByKey(w, MakePrefix(codeGuarantee, collID), guarantee) +} + +// RetrieveGuarantee retrieves a [flow.CollectionGuarantee] by the collection ID. +// For every collection that has been guaranteed, this data should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `collID` does not refer to a known guaranteed collection +func RetrieveGuarantee(r storage.Reader, collID flow.Identifier, guarantee *flow.CollectionGuarantee) error { + return RetrieveByKey(r, MakePrefix(codeGuarantee, collID), guarantee) +} + +// IndexPayloadGuarantees indexes the list of collection guarantees that were included in the specified block, +// keyed by the block ID. It produces a mapping from block ID to the list of collection guarantees contained in +// the block's payload. The collection guarantees are represented by their respective IDs. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No other errors are expected during normal operation. +func IndexPayloadGuarantees(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, guarIDs []flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot index guarantee for blockID %v without holding lock %s", + blockID, storage.LockInsertBlock) + } + return UpsertByKey(w, MakePrefix(codePayloadGuarantees, blockID), guarIDs) +} + +// LookupPayloadGuarantees retrieves the list of guarantee IDs that were included in the payload +// of the specified block. For every known block (at or above the root block height), this index should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a known block +func LookupPayloadGuarantees(r storage.Reader, blockID flow.Identifier, guarIDs *[]flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codePayloadGuarantees, blockID), guarIDs) +} diff --git a/storage/operation/guarantees_test.go b/storage/operation/guarantees_test.go new file mode 100644 index 00000000000..09a406627c3 --- /dev/null +++ b/storage/operation/guarantees_test.go @@ -0,0 +1,143 @@ +package operation_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/onflow/crypto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestGuaranteeInsertRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + g := unittest.CollectionGuaranteeFixture() + + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(t, err) + defer lctx.Release() + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UnsafeInsertGuarantee(lctx, rw.Writer(), g.CollectionID, g) + }) + require.NoError(t, err) + + var retrieved flow.CollectionGuarantee + err = operation.RetrieveGuarantee(db.Reader(), g.CollectionID, &retrieved) + require.NoError(t, err) + + assert.Equal(t, g, &retrieved) + }) +} + +func TestIndexGuaranteedCollectionByBlockHashInsertRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + blockID := flow.Identifier{0x10} + collID1 := flow.Identifier{0x01} + collID2 := flow.Identifier{0x02} + guarantees := []*flow.CollectionGuarantee{ + {CollectionID: collID1, Signature: crypto.Signature{0x10}}, + {CollectionID: collID2, Signature: crypto.Signature{0x20}}, + } + expected := flow.GetIDs(guarantees) + + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(t, err) + defer lctx.Release() + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, guarantee := range guarantees { + if err := operation.UnsafeInsertGuarantee(lctx, rw.Writer(), guarantee.ID(), guarantee); err != nil { + return err + } + } + if err := operation.IndexPayloadGuarantees(lctx, rw.Writer(), blockID, expected); err != nil { + return err + } + return nil + }) + require.NoError(t, err) + require.NoError(t, err) + + var actual []flow.Identifier + err = operation.LookupPayloadGuarantees(db.Reader(), blockID, &actual) + require.NoError(t, err) + + assert.Equal(t, []flow.Identifier{collID1, collID2}, actual) + }) +} + +func TestIndexGuaranteedCollectionByBlockHashMultipleBlocks(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + blockID1 := flow.Identifier{0x10} + blockID2 := flow.Identifier{0x20} + collID1 := flow.Identifier{0x01} + collID2 := flow.Identifier{0x02} + collID3 := flow.Identifier{0x03} + collID4 := flow.Identifier{0x04} + set1 := []*flow.CollectionGuarantee{ + {CollectionID: collID1, Signature: crypto.Signature{0x1}}, + } + set2 := []*flow.CollectionGuarantee{ + {CollectionID: collID2, Signature: crypto.Signature{0x2}}, + {CollectionID: collID3, Signature: crypto.Signature{0x3}}, + {CollectionID: collID4, Signature: crypto.Signature{0x1}}, + } + ids1 := flow.GetIDs(set1) + ids2 := flow.GetIDs(set2) + + // insert block 1 + unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, guarantee := range set1 { + if err := operation.UnsafeInsertGuarantee(lctx, rw.Writer(), guarantee.CollectionID, guarantee); err != nil { + return err + } + } + if err := operation.IndexPayloadGuarantees(lctx, rw.Writer(), blockID1, ids1); err != nil { + return err + } + return nil + }) + }) + + // insert block 2 + unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, guarantee := range set2 { + if err := operation.UnsafeInsertGuarantee(lctx, rw.Writer(), guarantee.CollectionID, guarantee); err != nil { + return err + } + } + if err := operation.IndexPayloadGuarantees(lctx, rw.Writer(), blockID2, ids2); err != nil { + return err + } + return nil + }) + }) + + t.Run("should retrieve collections for block", func(t *testing.T) { + var actual1 []flow.Identifier + err := operation.LookupPayloadGuarantees(db.Reader(), blockID1, &actual1) + assert.NoError(t, err) + assert.ElementsMatch(t, []flow.Identifier{collID1}, actual1) + + // get block 2 + var actual2 []flow.Identifier + err = operation.LookupPayloadGuarantees(db.Reader(), blockID2, &actual2) + assert.NoError(t, err) + assert.Equal(t, []flow.Identifier{collID2, collID3, collID4}, actual2) + }) + }) +} diff --git a/storage/operation/headers.go b/storage/operation/headers.go new file mode 100644 index 00000000000..60cdc23e0d8 --- /dev/null +++ b/storage/operation/headers.go @@ -0,0 +1,171 @@ +package operation + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertHeader inserts a block header into the database. +// +// CAUTION: +// - The caller must ensure that headerID is a collision-resistant hash of the provided header! +// Otherwise, data corruption may occur. +// - The caller must acquire one (but not both) of the following locks and hold it until the database +// write has been committed: either [storage.LockInsertBlock] or [storage.LockInsertOrFinalizeClusterBlock]. +// +// It returns [storage.ErrAlreadyExists] if the header already exists, i.e. we only insert a new header once. +// This error allows the caller to detect duplicate inserts. If the header is stored along with other parts +// of the block in the same batch, similar duplication checks can be skipped for storing other parts of the block. +// No other errors are expected during normal operation. +func InsertHeader(lctx lockctx.Proof, rw storage.ReaderBatchWriter, headerID flow.Identifier, header *flow.Header) error { + held := lctx.HoldsLock(storage.LockInsertBlock) || lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) + if !held { + return fmt.Errorf("missing required lock: %s or %s", storage.LockInsertBlock, storage.LockInsertOrFinalizeClusterBlock) + } + + key := MakePrefix(codeHeader, headerID) + exist, err := KeyExists(rw.GlobalReader(), key) + if err != nil { + return err + } + if exist { + return fmt.Errorf("header already exists: %w", storage.ErrAlreadyExists) + } + + return UpsertByKey(rw.Writer(), key, header) +} + +// RetrieveHeader retrieves the header of the block with the specified ID. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no block with the specified `blockID` is known. +func RetrieveHeader(r storage.Reader, blockID flow.Identifier, header *flow.Header) error { + return RetrieveByKey(r, MakePrefix(codeHeader, blockID), header) +} + +// IndexFinalizedBlockByHeight indexes a block by its height. It must ONLY be called on FINALIZED BLOCKS. +// +// CAUTION: The caller must acquire the [storage.LockFinalizeBlock] and hold it until the database +// write has been committed. +// +// This function guarantees that the index is only inserted once for each height. We return +// [storage.ErrAlreadyExists] if an entry for the given height already exists in the database. +// No other errors are expected during normal operation. +func IndexFinalizedBlockByHeight(lctx lockctx.Proof, rw storage.ReaderBatchWriter, height uint64, blockID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockFinalizeBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockFinalizeBlock) + } + + var existingID flow.Identifier + key := MakePrefix(codeHeightToBlock, height) + err := RetrieveByKey(rw.GlobalReader(), key, &existingID) + if err == nil { + return fmt.Errorf("block ID already exists for height %d with existing ID %v, cannot reindex with blockID %v: %w", + height, existingID, blockID, storage.ErrAlreadyExists) + } + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to check existing block ID for height %d: %w", height, err) + } + + return UpsertByKey(rw.Writer(), key, blockID) +} + +// IndexCertifiedBlockByView indexes a CERTIFIED block by its view. +// HotStuff guarantees that there is at most one certified block per view. Note that this does not hold +// for uncertified proposals, as a byzantine leader might produce multiple proposals for the same view. +// +// CAUTION: The caller must acquire the [storage.LockInsertBlock] and hold it until the database write +// has been committed. +// +// Hence, only certified blocks (i.e. blocks that have received a QC) can be indexed! +// Returns [storage.ErrAlreadyExists] if an ID has already been finalized for this view. +// No other errors are expected during normal operation. +func IndexCertifiedBlockByView(lctx lockctx.Proof, rw storage.ReaderBatchWriter, view uint64, blockID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertBlock) + } + + var existingID flow.Identifier + key := MakePrefix(codeCertifiedBlockByView, view) + err := RetrieveByKey(rw.GlobalReader(), key, &existingID) + if err == nil { + return fmt.Errorf("block ID already exists for view %d with existingID %v, cannot reindex with blockID %v: %w", + view, existingID, blockID, storage.ErrAlreadyExists) + } + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to check existing block ID for view %d: %w", view, err) + } + + return UpsertByKey(rw.Writer(), key, blockID) +} + +// LookupBlockHeight retrieves finalized blocks by height. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no finalized block for the specified height is known. +func LookupBlockHeight(r storage.Reader, height uint64, blockID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeHeightToBlock, height), blockID) +} + +// LookupCertifiedBlockByView retrieves the certified block by view. (Certified blocks are blocks that have received QC.) +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no certified block for the specified view is known. +func LookupCertifiedBlockByView(r storage.Reader, view uint64, blockID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeCertifiedBlockByView, view), blockID) +} + +// BlockExists checks whether the block exists in the database. +// No errors are expected during normal operation. +func BlockExists(r storage.Reader, blockID flow.Identifier) (bool, error) { + return KeyExists(r, MakePrefix(codeHeader, blockID)) +} + +// IndexCollectionBlock produces a mapping from collection ID to the block ID containing this collection. +// +// CAUTION: +// - The caller must acquire the lock ??? and hold it until the database write has been committed. +// TODO: USE LOCK, we want to protect this mapping from accidental overwrites (because the key is not derived from the value via a collision-resistant hash) +// - A collection can be included in multiple *unfinalized* blocks. However, the implementation +// assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY +// *and* only in the ABSENCE of BYZANTINE collector CLUSTERS (which the mature protocol must tolerate). +// Hence, this function should be treated as a temporary solution, which requires generalization +// (one-to-many mapping) for soft finality and the mature protocol. +// +// Expected errors during normal operations: +// TODO: return [storage.ErrAlreadyExists] or [storage.ErrDataMismatch] +func IndexCollectionBlock(w storage.Writer, collID flow.Identifier, blockID flow.Identifier) error { + return UpsertByKey(w, MakePrefix(codeCollectionBlock, collID), blockID) +} + +// LookupBlockContainingCollection retrieves the block containing the collection with the given ID. +// +// CAUTION: A collection can be included in multiple *unfinalized* blocks. However, the implementation +// assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY +// *and* only in the ABSENCE of BYZANTINE collector CLUSTERS (which the mature protocol must tolerate). +// Hence, this function should be treated as a temporary solution, which requires generalization +// (one-to-many mapping) for soft finality and the mature protocol. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no block is known that contains the specified collection ID. +func LookupBlockContainingCollection(r storage.Reader, collID flow.Identifier, blockID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeCollectionBlock, collID), blockID) +} + +// FindHeaders iterates through all headers, calling `filter` on each, and adding +// them to the `found` slice if `filter` returned true +func FindHeaders(r storage.Reader, filter func(header *flow.Header) bool, found *[]flow.Header) error { + return TraverseByPrefix(r, MakePrefix(codeHeader), func(key []byte, getValue func(destVal any) error) (bail bool, err error) { + var h flow.Header + err = getValue(&h) + if err != nil { + return true, err + } + if filter(&h) { + *found = append(*found, h) + } + return false, nil + }, storage.DefaultIteratorOptions()) +} diff --git a/storage/operation/headers_test.go b/storage/operation/headers_test.go new file mode 100644 index 00000000000..78a29cf2a7b --- /dev/null +++ b/storage/operation/headers_test.go @@ -0,0 +1,92 @@ +package operation_test + +import ( + "testing" + "time" + + "github.com/onflow/crypto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestHeaderInsertCheckRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + expected := &flow.Header{ + View: 1337, + Timestamp: time.Now().UTC(), + ParentID: flow.Identifier{0x11}, + PayloadHash: flow.Identifier{0x22}, + ParentVoterIndices: []byte{0x44}, + ParentVoterSigData: []byte{0x88}, + ProposerID: flow.Identifier{0x33}, + ProposerSigData: crypto.Signature{0x77}, + } + blockID := expected.ID() + + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(t, err) + defer lctx.Release() + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(lctx, rw, expected.ID(), expected) + }) + require.NoError(t, err) + + var actual flow.Header + err = operation.RetrieveHeader(db.Reader(), blockID, &actual) + require.NoError(t, err) + + assert.Equal(t, *expected, actual) + }) +} + +func TestHeaderIDIndexByCollectionID(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + + headerID := unittest.IdentifierFixture() + collectionID := unittest.IdentifierFixture() + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexCollectionBlock(rw.Writer(), collectionID, headerID) + }) + require.NoError(t, err) + + actualID := &flow.Identifier{} + err = operation.LookupBlockContainingCollection(db.Reader(), collectionID, actualID) + require.NoError(t, err) + assert.Equal(t, headerID, *actualID) + }) +} + +func TestBlockHeightIndexLookup(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + + height := uint64(1337) + expected := flow.Identifier{0x01, 0x02, 0x03} + + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockFinalizeBlock) + require.NoError(t, err) + defer lctx.Release() + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, height, expected) + }) + require.NoError(t, err) + + var actual flow.Identifier + err = operation.LookupBlockHeight(db.Reader(), height, &actual) + require.NoError(t, err) + + assert.Equal(t, expected, actual) + }) +} diff --git a/storage/operation/heights.go b/storage/operation/heights.go new file mode 100644 index 00000000000..32efe411ea7 --- /dev/null +++ b/storage/operation/heights.go @@ -0,0 +1,96 @@ +package operation + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/storage" +) + +func InsertRootHeight(w storage.Writer, height uint64) error { + return UpsertByKey(w, MakePrefix(codeFinalizedRootHeight), height) +} + +func RetrieveRootHeight(r storage.Reader, height *uint64) error { + return RetrieveByKey(r, MakePrefix(codeFinalizedRootHeight), height) +} + +func InsertSealedRootHeight(w storage.Writer, height uint64) error { + return UpsertByKey(w, MakePrefix(codeSealedRootHeight), height) +} + +func RetrieveSealedRootHeight(r storage.Reader, height *uint64) error { + return RetrieveByKey(r, MakePrefix(codeSealedRootHeight), height) +} + +// UpsertFinalizedHeight upserts the finalized height index, overwriting the current value. +// Updates to this index must strictly increase the finalized height. +// To enforce this, the caller must check the current finalized height while holding [storage.LockFinalizeBlock]. +func UpsertFinalizedHeight(lctx lockctx.Proof, w storage.Writer, height uint64) error { + if !lctx.HoldsLock(storage.LockFinalizeBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockFinalizeBlock) + } + return UpsertByKey(w, MakePrefix(codeFinalizedHeight), height) +} + +func RetrieveFinalizedHeight(r storage.Reader, height *uint64) error { + return RetrieveByKey(r, MakePrefix(codeFinalizedHeight), height) +} + +// UpsertSealedHeight upserts the latest sealed height, OVERWRITING the current value. +// Updates to this index must strictly increase the sealed height. +// To enforce this, the caller must check the current sealed height while holding [storage.LockFinalizeBlock]. +func UpsertSealedHeight(lctx lockctx.Proof, w storage.Writer, height uint64) error { + if !lctx.HoldsLock(storage.LockFinalizeBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockFinalizeBlock) + } + return UpsertByKey(w, MakePrefix(codeSealedHeight), height) +} + +func RetrieveSealedHeight(r storage.Reader, height *uint64) error { + return RetrieveByKey(r, MakePrefix(codeSealedHeight), height) +} + +// InsertEpochFirstHeight inserts the height of the first block in the given epoch. +// The first block of an epoch E is the finalized block with view >= E.FirstView. +// Although we don't store the final height of an epoch, it can be inferred from this index. +// The caller must hold [storage.LockFinalizeBlock]. This function enforces each index is written exactly once. +// Returns storage.ErrAlreadyExists if the height has already been indexed. +func InsertEpochFirstHeight(lctx lockctx.Proof, rw storage.ReaderBatchWriter, epoch, height uint64) error { + if !lctx.HoldsLock(storage.LockFinalizeBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockFinalizeBlock) + } + + var existingHeight uint64 + err := RetrieveEpochFirstHeight(rw.GlobalReader(), epoch, &existingHeight) + if err == nil { + return storage.ErrAlreadyExists + } + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to check existing epoch first height: %w", err) + } + + return UpsertByKey(rw.Writer(), MakePrefix(codeEpochFirstHeight, epoch), height) +} + +// RetrieveEpochFirstHeight retrieves the height of the first block in the given epoch. +// This operation does not require any locks, because the first height of an epoch does not change once set. +// Returns [storage.ErrNotFound] if the first block of the epoch has not yet been finalized. +func RetrieveEpochFirstHeight(r storage.Reader, epoch uint64, height *uint64) error { + return RetrieveByKey(r, MakePrefix(codeEpochFirstHeight, epoch), height) +} + +// RetrieveEpochLastHeight retrieves the height of the last block in the given epoch. +// This operation does not require any locks, because the first height of an epoch does not change once set. +// It's a more readable, but equivalent query to RetrieveEpochFirstHeight when interested in the last height of an epoch. +// Returns [storage.ErrNotFound] if the first block of the epoch has not yet been finalized. +func RetrieveEpochLastHeight(r storage.Reader, epoch uint64, height *uint64) error { + var nextEpochFirstHeight uint64 + if err := RetrieveByKey(r, MakePrefix(codeEpochFirstHeight, epoch+1), &nextEpochFirstHeight); err != nil { + return err + } + *height = nextEpochFirstHeight - 1 + return nil +} diff --git a/storage/operation/heights_test.go b/storage/operation/heights_test.go new file mode 100644 index 00000000000..01d6d050c4f --- /dev/null +++ b/storage/operation/heights_test.go @@ -0,0 +1,117 @@ +package operation_test + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" +) + +func TestFinalizedInsertUpdateRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockFinalizeBlock) + require.NoError(t, err) + defer lctx.Release() + + height := uint64(1337) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertFinalizedHeight(lctx, rw.Writer(), height) + }) + require.NoError(t, err) + + var retrieved uint64 + err = operation.RetrieveFinalizedHeight(db.Reader(), &retrieved) + require.NoError(t, err) + + assert.Equal(t, retrieved, height) + + height = 9999 + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertFinalizedHeight(lctx, rw.Writer(), height) + }) + require.NoError(t, err) + + err = operation.RetrieveFinalizedHeight(db.Reader(), &retrieved) + require.NoError(t, err) + + assert.Equal(t, retrieved, height) + }) +} + +func TestSealedInsertUpdateRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockFinalizeBlock) + require.NoError(t, err) + defer lctx.Release() + height := uint64(1337) + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertSealedHeight(lctx, rw.Writer(), height) + }) + require.NoError(t, err) + + var retrieved uint64 + err = operation.RetrieveSealedHeight(db.Reader(), &retrieved) + require.NoError(t, err) + + assert.Equal(t, retrieved, height) + + height = 9999 + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertSealedHeight(lctx, rw.Writer(), height) + }) + require.NoError(t, err) + + err = operation.RetrieveSealedHeight(db.Reader(), &retrieved) + require.NoError(t, err) + + assert.Equal(t, retrieved, height) + }) +} + +func TestEpochFirstBlockIndex_InsertRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockFinalizeBlock) + require.NoError(t, err) + defer lctx.Release() + height := rand.Uint64() + epoch := rand.Uint64() + + // retrieve when empty errors + var retrieved uint64 + err = operation.RetrieveEpochFirstHeight(db.Reader(), epoch, &retrieved) + require.ErrorIs(t, err, storage.ErrNotFound) + + // can insert + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertEpochFirstHeight(lctx, rw, epoch, height) + }) + require.NoError(t, err) + + // can retrieve + err = operation.RetrieveEpochFirstHeight(db.Reader(), epoch, &retrieved) + require.NoError(t, err) + assert.Equal(t, retrieved, height) + + // retrieve non-existent key errors + err = operation.RetrieveEpochFirstHeight(db.Reader(), epoch+1, &retrieved) + require.ErrorIs(t, err, storage.ErrNotFound) + + // insert existent key errors + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertEpochFirstHeight(lctx, rw, epoch, height) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + }) +} diff --git a/storage/operation/multi_dbstore.go b/storage/operation/multi_dbstore.go index 25e4ac1870b..1e161cdf4da 100644 --- a/storage/operation/multi_dbstore.go +++ b/storage/operation/multi_dbstore.go @@ -1,6 +1,10 @@ package operation -import "github.com/onflow/flow-go/storage" +import ( + "github.com/hashicorp/go-multierror" + + "github.com/onflow/flow-go/storage" +) type multiDBStore struct { rwStore storage.DB // primary read and write store @@ -32,3 +36,16 @@ func (b *multiDBStore) WithReaderBatchWriter(fn func(storage.ReaderBatchWriter) func (b *multiDBStore) NewBatch() storage.Batch { return b.rwStore.NewBatch() } + +func (b *multiDBStore) Close() error { + var result *multierror.Error + + if err := b.rwStore.Close(); err != nil { + result = multierror.Append(result, err) + } + if err := b.r.Close(); err != nil { + result = multierror.Append(result, err) + } + + return result.ErrorOrNil() +} diff --git a/storage/operation/multi_iterator_test.go b/storage/operation/multi_iterator_test.go index 9bdb3c4a171..db553541fc3 100644 --- a/storage/operation/multi_iterator_test.go +++ b/storage/operation/multi_iterator_test.go @@ -3,7 +3,7 @@ package operation_test import ( "testing" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/require" diff --git a/storage/operation/multi_reader_test.go b/storage/operation/multi_reader_test.go index ab247005028..dbbbabf26c5 100644 --- a/storage/operation/multi_reader_test.go +++ b/storage/operation/multi_reader_test.go @@ -3,7 +3,7 @@ package operation_test import ( "testing" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/require" diff --git a/storage/operation/multi_seeker_test.go b/storage/operation/multi_seeker_test.go index 78fba50c846..d37ccda5bcb 100644 --- a/storage/operation/multi_seeker_test.go +++ b/storage/operation/multi_seeker_test.go @@ -3,7 +3,7 @@ package operation_test import ( "testing" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/require" diff --git a/storage/operation/payload.go b/storage/operation/payload.go new file mode 100644 index 00000000000..e7f10bbdd7a --- /dev/null +++ b/storage/operation/payload.go @@ -0,0 +1,219 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertSeal inserts a [flow.Seal] into the database, keyed by its ID. +// +// CAUTION: The caller must ensure sealID is a collision-resistant hash of the provided seal! +// This method silently overrides existing data, which is safe only if for the same key, we +// always write the same value. +// +// No errors are expected during normal operation. +func InsertSeal(w storage.Writer, sealID flow.Identifier, seal *flow.Seal) error { + return UpsertByKey(w, MakePrefix(codeSeal, sealID), seal) +} + +// RetrieveSeal retrieves the [flow.Seal] by its ID. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no seal with the specified `sealID` is known. +func RetrieveSeal(r storage.Reader, sealID flow.Identifier, seal *flow.Seal) error { + return RetrieveByKey(r, MakePrefix(codeSeal, sealID), seal) +} + +// IndexPayloadSeals indexes the list of Seals that were included in the specified block by the block ID. +// It produces a mapping from block ID to the list of seals contained in the block's payload. +// The seals are represented by their respective IDs. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexPayloadSeals(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, sealIDs []flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot index seal for blockID %v without holding lock %s", + blockID, storage.LockInsertBlock) + } + return UpsertByKey(w, MakePrefix(codePayloadSeals, blockID), sealIDs) +} + +// LookupPayloadSeals retrieves the list of Seals that were included in the payload +// of the specified block. For every known block (at or above the root block height), +// this index should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a known block +func LookupPayloadSeals(r storage.Reader, blockID flow.Identifier, sealIDs *[]flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codePayloadSeals, blockID), sealIDs) +} + +// IndexPayloadReceipts indexes the list of Execution Receipts that were included in the specified block by the block ID. +// It produces a mapping from block ID to the list of Receipts contained in the block's payload. +// Execution Receipts are represented by their respective IDs. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexPayloadReceipts(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, receiptIDs []flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot index seal for blockID %v without holding lock %s", + blockID, storage.LockInsertBlock) + } + return UpsertByKey(w, MakePrefix(codePayloadReceipts, blockID), receiptIDs) +} + +// IndexPayloadResults indexes the list of Execution Results that were included in the specified block by the block ID. +// It produces a mapping from block ID to the list of Results contained in the block's payload. +// Execution Results are represented by their respective IDs. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexPayloadResults(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, resultIDs []flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot index seal for blockID %v without holding lock %s", + blockID, storage.LockInsertBlock) + } + return UpsertByKey(w, MakePrefix(codePayloadResults, blockID), resultIDs) +} + +// IndexPayloadProtocolStateID indexes the given Protocol State ID by the block ID. +// The Protocol State ID represents the configuration, which the block proposes to become active *after* the +// block's certification. Every block states the ID of the Protocol State it proposes as part of the payload. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexPayloadProtocolStateID(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, stateID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot index seal for blockID %v without holding lock %s", + blockID, storage.LockInsertBlock) + } + return UpsertByKey(w, MakePrefix(codePayloadProtocolStateID, blockID), stateID) +} + +// LookupPayloadProtocolStateID retrieves the Protocol State ID for the specified block. +// The Protocol State ID represents the configuration, which the block proposes to become active *after* +// the block's certification. For every known block (at or above the root block height), the protocol +// state at the end of the block should be specified in the payload, and hence be indexed. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a known block +func LookupPayloadProtocolStateID(r storage.Reader, blockID flow.Identifier, stateID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codePayloadProtocolStateID, blockID), stateID) +} + +// LookupPayloadReceipts retrieves the list of Execution Receipts that were included in the payload of the +// specified block. For every known block (at or above the root block height), this index should be populated. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a known block. +func LookupPayloadReceipts(r storage.Reader, blockID flow.Identifier, receiptIDs *[]flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codePayloadReceipts, blockID), receiptIDs) +} + +// LookupPayloadResults retrieves the list of Execution Results that were included in the payload of the +// specified block. For every known block (at or above the root block height), this index should be populated. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a known block +func LookupPayloadResults(r storage.Reader, blockID flow.Identifier, resultIDs *[]flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codePayloadResults, blockID), resultIDs) +} + +// IndexLatestSealAtBlock persists the highest seal that was included in the fork with head blockID. +// Frequently, the highest seal included in this block's payload. However, if there are no seals in +// this block, sealID should reference the highest seal in blockID's ancestors. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexLatestSealAtBlock(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, sealID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertBlock) + } + return UpsertByKey(w, MakePrefix(codeBlockIDToLatestSealID, blockID), sealID) +} + +// LookupLatestSealAtBlock finds the highest seal that was included in the fork up to (and including) blockID. +// Frequently, the highest seal included in this block's payload. However, if there are no seals in +// this block, sealID should reference the highest seal in blockID's ancestors. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if the specified block is unknown +func LookupLatestSealAtBlock(r storage.Reader, blockID flow.Identifier, sealID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeBlockIDToLatestSealID, blockID), &sealID) +} + +// IndexFinalizedSealByBlockID indexes the _finalized_ seal by the sealed block ID. +// Example: A <- B <- C(SealA) +// when block C is finalized, we create the index `A.ID->SealA.ID` +// +// CAUTION: +// - The caller must acquire the [storage.LockFinalizeBlock] and hold it until the database write has been committed. +// TODO: add lock proof as input and check for holding the lock in the implementation +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexFinalizedSealByBlockID(w storage.Writer, sealedBlockID flow.Identifier, sealID flow.Identifier) error { + return UpsertByKey(w, MakePrefix(codeBlockIDToFinalizedSeal, sealedBlockID), sealID) +} + +// LookupBySealedBlockID returns the finalized seal for the specified FINALIZED block ID. +// In order for a block to have a seal in a finalized block, it must itself be finalized. Hence, +// this function only works for finalized blocks. However, note that there might be finalized +// for which no seal exits (or the block containing the seal might not yet be finalized). +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no seal for the specified block is known. +func LookupBySealedBlockID(r storage.Reader, blockID flow.Identifier, sealID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeBlockIDToFinalizedSeal, blockID), &sealID) +} diff --git a/storage/operation/payload_test.go b/storage/operation/payload_test.go new file mode 100644 index 00000000000..70d375f5989 --- /dev/null +++ b/storage/operation/payload_test.go @@ -0,0 +1,67 @@ +package operation_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestSealInsertCheckRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + expected := unittest.Seal.Fixture() + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertSeal(rw.Writer(), expected.ID(), expected) + }) + require.NoError(t, err) + + var actual flow.Seal + err = operation.RetrieveSeal(db.Reader(), expected.ID(), &actual) + require.NoError(t, err) + + assert.Equal(t, expected, &actual) + }) +} + +func TestSealIndexAndLookup(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + seal1 := unittest.Seal.Fixture() + seal2 := unittest.Seal.Fixture() + + seals := []*flow.Seal{seal1, seal2} + blockID := flow.MakeID([]byte{0x42}) + expected := []flow.Identifier(flow.GetIDs(seals)) + + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(t, err) + defer lctx.Release() + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, seal := range seals { + if err := operation.InsertSeal(rw.Writer(), seal.ID(), seal); err != nil { + return err + } + } + if err := operation.IndexPayloadSeals(lctx, rw.Writer(), blockID, expected); err != nil { + return err + } + return nil + }) + require.NoError(t, err) + + var actual []flow.Identifier + err = operation.LookupPayloadSeals(db.Reader(), blockID, &actual) + require.NoError(t, err) + + assert.Equal(t, expected, actual) + }) +} diff --git a/storage/operation/pebbleimpl/dbstore.go b/storage/operation/pebbleimpl/dbstore.go index c7362681f24..f9891d135f4 100644 --- a/storage/operation/pebbleimpl/dbstore.go +++ b/storage/operation/pebbleimpl/dbstore.go @@ -1,7 +1,7 @@ package pebbleimpl import ( - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/onflow/flow-go/storage" ) @@ -25,3 +25,8 @@ func (b *dbStore) WithReaderBatchWriter(fn func(storage.ReaderBatchWriter) error func (b *dbStore) NewBatch() storage.Batch { return NewReaderBatchWriter(b.db) } + +// No errors are expected during normal operation. +func (b *dbStore) Close() error { + return b.db.Close() +} diff --git a/storage/operation/pebbleimpl/iterator.go b/storage/operation/pebbleimpl/iterator.go index 49792d160c3..269de850681 100644 --- a/storage/operation/pebbleimpl/iterator.go +++ b/storage/operation/pebbleimpl/iterator.go @@ -3,7 +3,7 @@ package pebbleimpl import ( "fmt" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/onflow/flow-go/storage" ) diff --git a/storage/operation/pebbleimpl/reader.go b/storage/operation/pebbleimpl/reader.go index 9dbb4d91d82..a1a2c122cf5 100644 --- a/storage/operation/pebbleimpl/reader.go +++ b/storage/operation/pebbleimpl/reader.go @@ -6,7 +6,7 @@ import ( "fmt" "io" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/storage" diff --git a/storage/operation/pebbleimpl/seeker.go b/storage/operation/pebbleimpl/seeker.go index 60e09f6cabc..ba3f8ca2813 100644 --- a/storage/operation/pebbleimpl/seeker.go +++ b/storage/operation/pebbleimpl/seeker.go @@ -9,7 +9,7 @@ import ( "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/merr" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" ) type pebbleSeeker struct { diff --git a/storage/operation/pebbleimpl/writer.go b/storage/operation/pebbleimpl/writer.go index f037b6b763e..656154deeb4 100644 --- a/storage/operation/pebbleimpl/writer.go +++ b/storage/operation/pebbleimpl/writer.go @@ -3,9 +3,8 @@ package pebbleimpl import ( "bytes" "fmt" - "sync" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation" @@ -22,9 +21,6 @@ type ReaderBatchWriter struct { // for executing callbacks after the batch has been flushed, such as updating caches callbacks *operation.Callbacks - - // for repreventing re-entrant deadlock - locks *operation.BatchLocks } var _ storage.ReaderBatchWriter = (*ReaderBatchWriter)(nil) @@ -50,14 +46,6 @@ func (b *ReaderBatchWriter) PebbleWriterBatch() *pebble.Batch { return b.batch } -// Lock tries to acquire the lock for the batch. -// if the lock is already acquired by this same batch from other pending db operations, -// then it will not be blocked and can continue updating the batch, which prevents a re-entrant deadlock. -// CAUTION: The caller must ensure that no other references exist for the input lock. -func (b *ReaderBatchWriter) Lock(lock *sync.Mutex) { - b.locks.Lock(lock, b.callbacks) -} - // AddCallback adds a callback to execute after the batch has been flush // regardless the batch update is succeeded or failed. // The error parameter is the error returned by the batch update. @@ -66,11 +54,10 @@ func (b *ReaderBatchWriter) AddCallback(callback func(error)) { } // Commit flushes the batch to the database. -// No errors are expected during normal operation. +// Commit may be called at most once per Batch. // ReaderBatchWriter can't be reused after Commit() is called. +// No errors are expected during normal operation. func (b *ReaderBatchWriter) Commit() error { - defer b.batch.Close() // Release batch resource - err := b.batch.Commit(pebble.Sync) b.callbacks.NotifyCallbacks(err) @@ -79,15 +66,16 @@ func (b *ReaderBatchWriter) Commit() error { } // Close releases memory of the batch and no error is returned. +// Close must be called exactly once per batch. // This can be called as a defer statement immediately after creating Batch // to reduce risk of unbounded memory consumption. +// No errors are expected during normal operation. func (b *ReaderBatchWriter) Close() error { // Pebble v2 docs for Batch.Close(): // // "Close closes the batch without committing it." - b.batch.Close() - return nil + return b.batch.Close() } func WithReaderBatchWriter(db *pebble.DB, fn func(storage.ReaderBatchWriter) error) error { @@ -113,7 +101,6 @@ func NewReaderBatchWriter(db *pebble.DB) *ReaderBatchWriter { globalReader: ToReader(db), batch: db.NewBatch(), callbacks: operation.NewCallbacks(), - locks: operation.NewBatchLocks(), } } diff --git a/storage/operation/prefix.go b/storage/operation/prefix.go index e2a1c9dc4f0..6bcabb8ee96 100644 --- a/storage/operation/prefix.go +++ b/storage/operation/prefix.go @@ -11,8 +11,8 @@ import ( const ( // codes for special database markers - // codeMax = 1 // deprecated - codeDBType = 2 // specifies a database type + _ = 1 // DEPRECATED: previously used for badger to denote the max length of the storage codes in units of bytes + _ = 2 // DEPRECATED: previously used to differentiate the protocol database from the secrets database; now the former is pebble and the latter is badger // codes for views with special meaning codeSafetyData = 10 // safety data for hotstuff state @@ -55,6 +55,7 @@ const ( codeBlockIDToQuorumCertificate = 45 // index of quorum certificates by block ID codeEpochProtocolStateByBlockID = 46 // index of epoch protocol state entry by block ID codeProtocolKVStoreByBlockID = 47 // index of protocol KV store entry by block ID + codeCertifiedBlockByView = 48 // index mapping view to ID of certified block (guaranteed by HotStuff that there is at most one per view) // codes for indexing multiple identifiers by identifier codeBlockChildren = 50 // index mapping block ID to children blocks @@ -72,9 +73,10 @@ const ( // codes related to protocol level information codeEpochSetup = 61 // EpochSetup service event, keyed by ID codeEpochCommit = 62 // EpochCommit service event, keyed by ID - codeBeaconPrivateKey = 63 // BeaconPrivateKey, keyed by epoch counter - codeDKGStarted = 64 // flag that the DKG for an epoch has been started - codeDKGEnded = 65 // flag that the DKG for an epoch has ended (stores end state) + _ = 63 // USED BY SECRETS DATABASE: BeaconPrivateKey, keyed by epoch counter + _ = 64 // DEPRECATED: flag that the DKG for an epoch has been started + _ = 65 // DEPRECATED: flag that the DKG for an epoch has ended (stores end state) + _ = 66 // USED BY SECRETS DATABASE: current state of Recoverable Random Beacon State Machine for given epoch codeVersionBeacon = 67 // flag for storing version beacons codeEpochProtocolState = 68 codeProtocolKVStore = 69 diff --git a/storage/badger/operation/protocol_kv_store.go b/storage/operation/protocol_kv_store.go similarity index 53% rename from storage/badger/operation/protocol_kv_store.go rename to storage/operation/protocol_kv_store.go index 243c8dc57c4..ad00bf4f80f 100644 --- a/storage/badger/operation/protocol_kv_store.go +++ b/storage/operation/protocol_kv_store.go @@ -1,39 +1,38 @@ package operation import ( - "github.com/dgraph-io/badger/v2" - "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" ) // InsertProtocolKVStore inserts a protocol KV store by ID. // Error returns: // - storage.ErrAlreadyExists if the key already exists in the database. // - generic error in case of unexpected failure from the database layer or encoding failure. -func InsertProtocolKVStore(protocolKVStoreID flow.Identifier, kvStore *flow.PSKeyValueStoreData) func(*badger.Txn) error { - return insert(makePrefix(codeProtocolKVStore, protocolKVStoreID), kvStore) +func InsertProtocolKVStore(w storage.Writer, protocolKVStoreID flow.Identifier, kvStore *flow.PSKeyValueStoreData) error { + return UpsertByKey(w, MakePrefix(codeProtocolKVStore, protocolKVStoreID), kvStore) } // RetrieveProtocolKVStore retrieves a protocol KV store by ID. // Error returns: // - storage.ErrNotFound if the key does not exist in the database // - generic error in case of unexpected failure from the database layer -func RetrieveProtocolKVStore(protocolKVStoreID flow.Identifier, kvStore *flow.PSKeyValueStoreData) func(*badger.Txn) error { - return retrieve(makePrefix(codeProtocolKVStore, protocolKVStoreID), kvStore) +func RetrieveProtocolKVStore(r storage.Reader, protocolKVStoreID flow.Identifier, kvStore *flow.PSKeyValueStoreData) error { + return RetrieveByKey(r, MakePrefix(codeProtocolKVStore, protocolKVStoreID), kvStore) } // IndexProtocolKVStore indexes a protocol KV store by block ID. // Error returns: // - storage.ErrAlreadyExists if the key already exists in the database. // - generic error in case of unexpected failure from the database layer -func IndexProtocolKVStore(blockID flow.Identifier, protocolKVStoreID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeProtocolKVStoreByBlockID, blockID), protocolKVStoreID) +func IndexProtocolKVStore(w storage.Writer, blockID flow.Identifier, protocolKVStoreID flow.Identifier) error { + return UpsertByKey(w, MakePrefix(codeProtocolKVStoreByBlockID, blockID), protocolKVStoreID) } // LookupProtocolKVStore finds protocol KV store ID by block ID. // Error returns: // - storage.ErrNotFound if the key does not exist in the database // - generic error in case of unexpected failure from the database layer -func LookupProtocolKVStore(blockID flow.Identifier, protocolKVStoreID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeProtocolKVStoreByBlockID, blockID), protocolKVStoreID) +func LookupProtocolKVStore(r storage.Reader, blockID flow.Identifier, protocolKVStoreID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeProtocolKVStoreByBlockID, blockID), protocolKVStoreID) } diff --git a/storage/badger/operation/protocol_kv_store_test.go b/storage/operation/protocol_kv_store_test.go similarity index 53% rename from storage/badger/operation/protocol_kv_store_test.go rename to storage/operation/protocol_kv_store_test.go index 433e743567c..59292c897ee 100644 --- a/storage/badger/operation/protocol_kv_store_test.go +++ b/storage/operation/protocol_kv_store_test.go @@ -1,40 +1,46 @@ -package operation +package operation_test import ( "testing" - "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" "github.com/onflow/flow-go/utils/unittest" ) // TestInsertProtocolKVStore tests if basic badger operations on ProtocolKVStore work as expected. func TestInsertProtocolKVStore(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { expected := &flow.PSKeyValueStoreData{ Version: 2, Data: unittest.RandomBytes(32), } kvStoreStateID := unittest.IdentifierFixture() - err := db.Update(InsertProtocolKVStore(kvStoreStateID, expected)) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertProtocolKVStore(rw.Writer(), kvStoreStateID, expected) + }) require.NoError(t, err) var actual flow.PSKeyValueStoreData - err = db.View(RetrieveProtocolKVStore(kvStoreStateID, &actual)) + err = operation.RetrieveProtocolKVStore(db.Reader(), kvStoreStateID, &actual) require.NoError(t, err) assert.Equal(t, expected, &actual) blockID := unittest.IdentifierFixture() - err = db.Update(IndexProtocolKVStore(blockID, kvStoreStateID)) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexProtocolKVStore(rw.Writer(), blockID, kvStoreStateID) + }) require.NoError(t, err) var actualProtocolKVStoreID flow.Identifier - err = db.View(LookupProtocolKVStore(blockID, &actualProtocolKVStoreID)) + err = operation.LookupProtocolKVStore(db.Reader(), blockID, &actualProtocolKVStoreID) require.NoError(t, err) assert.Equal(t, kvStoreStateID, actualProtocolKVStoreID) diff --git a/storage/operation/qcs.go b/storage/operation/qcs.go new file mode 100644 index 00000000000..00f1cabe7b8 --- /dev/null +++ b/storage/operation/qcs.go @@ -0,0 +1,51 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertQuorumCertificate atomically performs the following storage operations for the given QuorumCertificate [QC]: +// 1. Check if a QC certifying the same block is already stored. +// 2. Only if no QC exists for the block, append the storage operations for indexing the QC by the block ID it certifies. +// +// CAUTION: +// - For the same block, different QCs can easily be constructed by selecting different sub-sets +// of the received votes. In most cases, it is only important that a block has been certified, +// but it is irrelevant who specifically contributed to the QC. Therefore, we only store the first QC. +// - In order to make sure only one QC is stored per block, _all calls_ to +// `InsertQuorumCertificate` must be synchronized by the higher-logic. Currently, we have the +// lockctx.Proof to prove the higher logic is holding the [storage.LockInsertBlock] when +// inserting the QC after checking that no QC is already stored. +// +// Expected error returns: +// - [storage.ErrAlreadyExists] if any QuorumCertificate certifying the samn block already exists +func InsertQuorumCertificate(lctx lockctx.Proof, rw storage.ReaderBatchWriter, qc *flow.QuorumCertificate) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot insert quorum certificate without holding lock %s", storage.LockInsertBlock) + } + + key := MakePrefix(codeBlockIDToQuorumCertificate, qc.BlockID) + exist, err := KeyExists(rw.GlobalReader(), key) + if err != nil { + return fmt.Errorf("failed to check if quorum certificate exists for block %s: %w", qc.BlockID, err) + } + if exist { + return fmt.Errorf("quorum certificate for block %s already exists: %w", qc.BlockID, storage.ErrAlreadyExists) + } + + return UpsertByKey(rw.Writer(), key, qc) +} + +// RetrieveQuorumCertificate retrieves the QuorumCertificate for the specified block. +// For every block that has been certified, this index should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a certified block +func RetrieveQuorumCertificate(r storage.Reader, blockID flow.Identifier, qc *flow.QuorumCertificate) error { + return RetrieveByKey(r, MakePrefix(codeBlockIDToQuorumCertificate, blockID), qc) +} diff --git a/storage/operation/qcs_test.go b/storage/operation/qcs_test.go new file mode 100644 index 00000000000..2dbc0c57343 --- /dev/null +++ b/storage/operation/qcs_test.go @@ -0,0 +1,54 @@ +package operation_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestInsertQuorumCertificate(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + expected := unittest.QuorumCertificateFixture() + lockManager := storage.NewTestingLockManager() + + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertBlock)) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertQuorumCertificate(lctx, rw, expected) + }) + require.NoError(t, err) + + // While still holding the lock, get value; this verifies that reads are not blocked by acquired locks + var actual flow.QuorumCertificate + err = operation.RetrieveQuorumCertificate(db.Reader(), expected.BlockID, &actual) + require.NoError(t, err) + assert.Equal(t, expected, &actual) + lctx.Release() + + // create a different QC for the same block + different := unittest.QuorumCertificateFixture() + different.BlockID = expected.BlockID + + // verify that overwriting the prior QC fails with `storage.ErrAlreadyExists` + lctx2 := lockManager.NewContext() + require.NoError(t, lctx2.AcquireLock(storage.LockInsertBlock)) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertQuorumCertificate(lctx2, rw, different) + }) + require.Error(t, err) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + lctx2.Release() + + // verify that the original QC is still there + err = operation.RetrieveQuorumCertificate(db.Reader(), expected.BlockID, &actual) + require.NoError(t, err) + assert.Equal(t, expected, &actual) + }) +} diff --git a/storage/operation/reads.go b/storage/operation/reads.go index ca9cabfcfe0..d1c8b743d21 100644 --- a/storage/operation/reads.go +++ b/storage/operation/reads.go @@ -13,27 +13,11 @@ import ( "github.com/onflow/flow-go/utils/merr" ) -// CheckFunc is a function that checks if the value should be read and decoded. -// return (true, nil) to read the value and pass it to the CreateFunc and HandleFunc for decoding -// return (false, nil) to skip reading the value -// return (false, err) if running into any exception, the iteration should be stopped. -// when making a CheckFunc to be used in the IterationFunc to iterate over the keys, a sentinel error -// can be defined and checked to stop the iteration early, such as finding the first key that match -// certain condition. -// Note: the returned bool is to decide whether to read the value or not, rather than whether to stop -// the iteration or not. -type CheckFunc func(key []byte) (bool, error) - -// CreateFunc returns a pointer to an initialized entity that we can potentially -// decode the next value into during a badger DB iteration. -type CreateFunc func() interface{} - -// HandleFunc is a function that starts the processing of the current key-value -// pair during a badger iteration. It should be called after the key was checked -// and the entity was decoded. -// No errors are expected during normal operation. Any errors will halt the iteration. -type HandleFunc func() error -type IterationFunc func() (CheckFunc, CreateFunc, HandleFunc) +// IterationFunc is a callback function that will be called on each key-value pair during the iteration. +// The key is copied and passed to the function, so key can be modified or retained after iteration. +// The `getValue` function can be called to retrieve the value of the current key and decode value into destVal object. +// The caller can return (true, nil) to stop the iteration early. +type IterationFunc func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) // IterateKeysByPrefixRange will iterate over all entries in the database, where the key starts with a prefixes in // the range [startPrefix, endPrefix] (both inclusive). We require that startPrefix <= endPrefix (otherwise this @@ -41,15 +25,14 @@ type IterationFunc func() (CheckFunc, CreateFunc, HandleFunc) // In other words, error returned by the iteration functions will be propagated to the caller. // No errors expected during normal operations. func IterateKeysByPrefixRange(r storage.Reader, startPrefix []byte, endPrefix []byte, check func(key []byte) error) error { - return IterateKeys(r, startPrefix, endPrefix, func() (CheckFunc, CreateFunc, HandleFunc) { - return func(key []byte) (bool, error) { - err := check(key) - if err != nil { - return false, err - } - return false, nil - }, nil, nil - }, storage.IteratorOption{BadgerIterateKeyOnly: true}) + iterFunc := func(key []byte, getValue func(destVal any) error) (bail bool, err error) { + err = check(key) + if err != nil { + return true, err + } + return false, nil + } + return IterateKeys(r, startPrefix, endPrefix, iterFunc, storage.IteratorOption{BadgerIterateKeyOnly: true}) } // IterateKeys will iterate over all entries in the database, where the key starts with a prefixes in @@ -81,9 +64,6 @@ func IterateKeys(r storage.Reader, startPrefix []byte, endPrefix []byte, iterFun item := it.IterItem() key := item.Key() - // initialize processing functions for iteration - check, create, handle := iterFunc() - keyCopy := make([]byte, len(key)) // The underlying database may re-use and modify the backing memory of the returned key. @@ -91,41 +71,23 @@ func IterateKeys(r storage.Reader, startPrefix []byte, endPrefix []byte, iterFun copy(keyCopy, key) // check if we should process the item at all - shouldReadValue, err := check(keyCopy) + bail, err := iterFunc(keyCopy, func(destVal any) error { + return item.Value(func(val []byte) error { + return msgpack.Unmarshal(val, destVal) + }) + }) if err != nil { return err } - if !shouldReadValue { // skip reading value - continue - } - - err = item.Value(func(val []byte) error { - - // decode into the entity - entity := create() - err = msgpack.Unmarshal(val, entity) - if err != nil { - return irrecoverable.NewExceptionf("could not decode entity: %w", err) - } - - // process the entity - err = handle() - if err != nil { - return fmt.Errorf("could not handle entity: %w", err) - } - + if bail { return nil - }) - - if err != nil { - return fmt.Errorf("could not process value: %w", err) } } return nil } -// Traverse will iterate over all keys with the given prefix +// TraverseByPrefix will iterate over all keys with the given prefix // error returned by the iteration functions will be propagated to the caller. // No other errors are expected during normal operation. func TraverseByPrefix(r storage.Reader, prefix []byte, iterFunc IterationFunc, opt storage.IteratorOption) error { @@ -134,20 +96,12 @@ func TraverseByPrefix(r storage.Reader, prefix []byte, iterFunc IterationFunc, o // KeyOnlyIterateFunc returns an IterationFunc that only iterates over keys func KeyOnlyIterateFunc(fn func(key []byte) error) IterationFunc { - return func() (CheckFunc, CreateFunc, HandleFunc) { - checker := func(key []byte) (bool, error) { - return false, fn(key) - } - - create := func() interface{} { - return nil - } - - handle := func() error { - return nil + return func(key []byte, _ func(destVal any) error) (bail bool, err error) { + err = fn(key) + if err != nil { + return true, err } - - return checker, create, handle + return false, nil } } @@ -180,7 +134,7 @@ func KeyExists(r storage.Reader, key []byte) (exist bool, errToReturn error) { // - storage.ErrNotFound if the key does not exist in the database // - generic error in case of unexpected failure from the database layer, or failure // to decode an existing database value -func RetrieveByKey(r storage.Reader, key []byte, entity interface{}) (errToReturn error) { +func RetrieveByKey(r storage.Reader, key []byte, entity any) (errToReturn error) { val, closer, err := r.Get(key) if err != nil { return err @@ -201,7 +155,7 @@ func RetrieveByKey(r storage.Reader, key []byte, entity interface{}) (errToRetur // keys with the format prefix` + `height` (where "+" denotes concatenation of binary strings). The height // is encoded as Big-Endian (entries with numerically smaller height have lexicographically smaller key). // The function finds the *highest* key with the given prefix and height equal to or below the given height. -func FindHighestAtOrBelowByPrefix(r storage.Reader, prefix []byte, height uint64, entity interface{}) (errToReturn error) { +func FindHighestAtOrBelowByPrefix(r storage.Reader, prefix []byte, height uint64, entity any) (errToReturn error) { if len(prefix) == 0 { return fmt.Errorf("prefix must not be empty") } diff --git a/storage/operation/reads_bench_test.go b/storage/operation/reads_bench_test.go index 401fe53d217..7ad5a520be9 100644 --- a/storage/operation/reads_bench_test.go +++ b/storage/operation/reads_bench_test.go @@ -19,7 +19,7 @@ func BenchmarkRetrieve(t *testing.B) { for i := 0; i < t.N; i++ { var readBack Entity - require.NoError(t, operation.Retrieve(e.Key(), &readBack)(r)) + require.NoError(t, operation.RetrieveByKey(r, e.Key(), &readBack)) } }) } @@ -34,8 +34,8 @@ func BenchmarkNonExist(t *testing.B) { t.ResetTimer() nonExist := Entity{ID: uint64(t.N + 1)} for i := 0; i < t.N; i++ { - var exists bool - require.NoError(t, operation.Exists(nonExist.Key(), &exists)(r)) + _, err := operation.KeyExists(r, nonExist.Key()) + require.NoError(t, err) } }) } @@ -55,9 +55,9 @@ func BenchmarkIterate(t *testing.B) { t.ResetTimer() var found [][]byte - require.NoError(t, operation.Iterate(prefix1, prefix2, func(key []byte) error { + require.NoError(t, operation.IterateKeysByPrefixRange(r, prefix1, prefix2, func(key []byte) error { found = append(found, key) return nil - })(r), "should iterate forward without error") + }), "should iterate forward without error") }) } diff --git a/storage/operation/reads_test.go b/storage/operation/reads_test.go index 5bd4336ce90..78a4d51024e 100644 --- a/storage/operation/reads_test.go +++ b/storage/operation/reads_test.go @@ -90,10 +90,10 @@ func TestIterateKeysByPrefixRange(t *testing.T) { // Forward iteration and check boundaries var found [][]byte - require.NoError(t, operation.Iterate(prefixStart, prefixEnd, func(key []byte) error { + require.NoError(t, operation.IterateKeysByPrefixRange(r, prefixStart, prefixEnd, func(key []byte) error { found = append(found, key) return nil - })(r), "should iterate forward without error") + }), "should iterate forward without error") require.ElementsMatch(t, keysInRange, found, "forward iteration should return the correct keys in range") }) } @@ -160,10 +160,10 @@ func TestIterateHierachicalPrefixes(t *testing.T) { {0x11, 0xff, 0xff}, } firstPrefixRangeActual := make([][]byte, 0) - err := operation.Iterate([]byte{0x10}, []byte{0x11}, func(key []byte) error { + err := operation.IterateKeysByPrefixRange(r, []byte{0x10}, []byte{0x11}, func(key []byte) error { firstPrefixRangeActual = append(firstPrefixRangeActual, key) return nil - })(r) + }) require.NoError(t, err, "iterate with range of first prefixes should not return an error") require.Equal(t, firstPrefixRangeExpected, firstPrefixRangeActual, "iterated values for range of first prefixes should match expected values") @@ -175,10 +175,10 @@ func TestIterateHierachicalPrefixes(t *testing.T) { {0x10, 0x21, 0x00}, {0x10, 0x21, 0xff}, } - err = operation.Iterate([]byte{0x10, 0x20}, []byte{0x10, 0x21}, func(key []byte) error { + err = operation.IterateKeysByPrefixRange(r, []byte{0x10, 0x20}, []byte{0x10, 0x21}, func(key []byte) error { secondPrefixRangeActual = append(secondPrefixRangeActual, key) return nil - })(r) + }) require.NoError(t, err, "iterate with range of second prefixes should not return an error") require.Equal(t, secondPrefixRangeExpected, secondPrefixRangeActual, "iterated values for range of second prefixes should match expected values") }) @@ -229,10 +229,10 @@ func TestIterationBoundary(t *testing.T) { // Forward iteration and check boundaries var found [][]byte - require.NoError(t, operation.Iterate(prefixStart, prefixEnd, func(key []byte) error { + require.NoError(t, operation.IterateKeysByPrefixRange(r, prefixStart, prefixEnd, func(key []byte) error { found = append(found, key) return nil - })(r), "should iterate forward without error") + }), "should iterate forward without error") require.ElementsMatch(t, expectedKeys, found, "forward iteration should return the correct keys in range") }) } @@ -264,24 +264,22 @@ func TestTraverse(t *testing.T) { actual := make([]uint64, 0, len(keyVals)) // Define the iteration logic - iterationFunc := func() (operation.CheckFunc, operation.CreateFunc, operation.HandleFunc) { - check := func(key []byte) (bool, error) { - // Skip the key {0x42, 0x56} - return !bytes.Equal(key, []byte{0x42, 0x56}), nil + iterationFunc := func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { + // Skip the key {0x42, 0x56} + if bytes.Equal(keyCopy, []byte{0x42, 0x56}) { + return false, nil } var val uint64 - create := func() interface{} { - return &val + err = getValue(&val) + if err != nil { + return true, err } - handle := func() error { - actual = append(actual, val) - return nil - } - return check, create, handle + actual = append(actual, val) + return false, nil } // Traverse the keys starting with prefix {0x42} - err := operation.Traverse([]byte{0x42}, iterationFunc, storage.DefaultIteratorOptions())(r) + err := operation.TraverseByPrefix(r, []byte{0x42}, iterationFunc, storage.DefaultIteratorOptions()) require.NoError(t, err, "traverse should not return an error") // Assert that the actual values match the expected values @@ -322,10 +320,10 @@ func TestTraverseKeyOnly(t *testing.T) { actual := make([][]byte, 0) // Traverse the keys starting with prefix {0x11} - err := operation.Traverse([]byte{0x10}, operation.KeyOnlyIterateFunc(func(key []byte) error { + err := operation.TraverseByPrefix(r, []byte{0x10}, func(key []byte, getValue func(destVal any) error) (bail bool, err error) { actual = append(actual, key) - return nil - }), storage.DefaultIteratorOptions())(r) + return false, nil + }, storage.DefaultIteratorOptions()) require.NoError(t, err, "traverse should not return an error") // Assert that the actual values match the expected values @@ -392,10 +390,11 @@ func TestFindHighestAtOrBelow(t *testing.T) { prefixToUse = []byte{} } - err := operation.FindHighestAtOrBelow( + err := operation.FindHighestAtOrBelowByPrefix( + r, prefixToUse, tt.height, - &entity)(r) + &entity) if tt.expectError { require.Error(t, err, fmt.Sprintf("expected error but got nil, entity: %v", entity)) diff --git a/storage/operation/receipts.go b/storage/operation/receipts.go index e6941eb5fe0..c969d3b585e 100644 --- a/storage/operation/receipts.go +++ b/storage/operation/receipts.go @@ -5,58 +5,90 @@ import ( "github.com/onflow/flow-go/storage" ) -// InsertExecutionReceiptMeta inserts an execution receipt meta by ID. +// InsertExecutionReceiptMeta inserts a [flow.ExecutionReceiptMeta] into the database, keyed by its ID. +// +// CAUTION: The caller must ensure receiptID is a collision-resistant hash of the provided +// [flow.ExecutionReceiptMeta]! This method silently overrides existing data, which is safe only if +// for the same key, we always write the same value. +// +// No errors are expected during normal operation. func InsertExecutionReceiptMeta(w storage.Writer, receiptID flow.Identifier, meta *flow.ExecutionReceiptMeta) error { return UpsertByKey(w, MakePrefix(codeExecutionReceiptMeta, receiptID), meta) } -// RetrieveExecutionReceiptMeta retrieves a execution receipt meta by ID. +// RetrieveExecutionReceiptMeta retrieves a [flow.ExecutionReceiptMeta] by its ID. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no receipt meta with the specified ID is known. func RetrieveExecutionReceiptMeta(r storage.Reader, receiptID flow.Identifier, meta *flow.ExecutionReceiptMeta) error { return RetrieveByKey(r, MakePrefix(codeExecutionReceiptMeta, receiptID), meta) } -// IndexOwnExecutionReceipt inserts an execution receipt ID keyed by block ID +// IndexOwnExecutionReceipt indexes the Execution Node's OWN execution receipt by the executed block ID. +// +// CAUTION: +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The caller +// is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. func IndexOwnExecutionReceipt(w storage.Writer, blockID flow.Identifier, receiptID flow.Identifier) error { return UpsertByKey(w, MakePrefix(codeOwnBlockReceipt, blockID), receiptID) } -// LookupOwnExecutionReceipt finds execution receipt ID by block +// LookupOwnExecutionReceipt retrieves the Execution Node's OWN execution receipt ID for the specified block. +// Intended for Execution Node only. For every block executed by this node, this index should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a block executed by this node func LookupOwnExecutionReceipt(r storage.Reader, blockID flow.Identifier, receiptID *flow.Identifier) error { return RetrieveByKey(r, MakePrefix(codeOwnBlockReceipt, blockID), receiptID) } -// RemoveOwnExecutionReceipt removes own execution receipt index by blockID +// RemoveOwnExecutionReceipt removes the Execution Node's OWN execution receipt index for the given block ID. +// CAUTION: this is for recovery purposes only, and should not be used during normal operations! +// It returns nil if the collection does not exist. +// +// No errors are expected during normal operation. func RemoveOwnExecutionReceipt(w storage.Writer, blockID flow.Identifier) error { return RemoveByKey(w, MakePrefix(codeOwnBlockReceipt, blockID)) } -// IndexExecutionReceipts inserts an execution receipt ID keyed by block ID and receipt ID. -// one block could have multiple receipts, even if they are from the same executor +// IndexExecutionReceipts adds the given execution receipts to the set of all known receipts for the +// given block. It produces a mapping from block ID to the set of all known receipts for that block. +// One block could have multiple receipts, even if they are from the same executor. +// +// This method is idempotent, and can be called repeatedly with the same block ID and receipt ID, +// without the risk of data corruption. +// +// No errors are expected during normal operation. func IndexExecutionReceipts(w storage.Writer, blockID, receiptID flow.Identifier) error { return UpsertByKey(w, MakePrefix(codeAllBlockReceipts, blockID, receiptID), receiptID) } -// LookupExecutionReceipts finds all execution receipts by block ID +// LookupExecutionReceipts retrieves the set of all execution receipts for the specified block. +// For every known block (at or above the root block height), this index should be populated +// with all known receipts for that block. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a known block func LookupExecutionReceipts(r storage.Reader, blockID flow.Identifier, receiptIDs *[]flow.Identifier) error { iterationFunc := receiptIterationFunc(receiptIDs) return TraverseByPrefix(r, MakePrefix(codeAllBlockReceipts, blockID), iterationFunc, storage.DefaultIteratorOptions()) } -// receiptIterationFunc returns an in iteration function which returns all receipt IDs found during traversal -func receiptIterationFunc(receiptIDs *[]flow.Identifier) func() (CheckFunc, CreateFunc, HandleFunc) { - check := func(key []byte) (bool, error) { - return true, nil - } - - var receiptID flow.Identifier - create := func() interface{} { - return &receiptID - } - handle := func() error { +// receiptIterationFunc returns an iteration function which collects all receipt IDs found during traversal. +func receiptIterationFunc(receiptIDs *[]flow.Identifier) IterationFunc { + return func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { + var receiptID flow.Identifier + err = getValue(&receiptID) + if err != nil { + return true, err + } *receiptIDs = append(*receiptIDs, receiptID) - return nil - } - return func() (CheckFunc, CreateFunc, HandleFunc) { - return check, create, handle + return false, nil } } diff --git a/storage/operation/results.go b/storage/operation/results.go index ec7f3c6f4c3..29937653968 100644 --- a/storage/operation/results.go +++ b/storage/operation/results.go @@ -5,31 +5,59 @@ import ( "github.com/onflow/flow-go/storage" ) -// InsertExecutionResult inserts an execution result by ID. +// InsertExecutionResult inserts a [flow.ExecutionResult] into the storage, keyed by its ID. +// +// If the result already exists, it will be overwritten. Note that here, the key (result ID) is derived +// from the value (result) via a collision-resistant hash function. Hence, unchecked overwrites pose no risk +// of data corruption, because for the same key, we expect the same value. +// +// No errors are expected during normal operation. func InsertExecutionResult(w storage.Writer, result *flow.ExecutionResult) error { return UpsertByKey(w, MakePrefix(codeExecutionResult, result.ID()), result) } -// RetrieveExecutionResult retrieves a transaction by fingerprint. +// RetrieveExecutionResult retrieves an Execution Result by its ID. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no result with the specified `resultID` is known. func RetrieveExecutionResult(r storage.Reader, resultID flow.Identifier, result *flow.ExecutionResult) error { return RetrieveByKey(r, MakePrefix(codeExecutionResult, resultID), result) } -// IndexExecutionResult inserts an execution result ID keyed by block ID +// IndexExecutionResult indexes the Execution Node's OWN Execution Result by the executed block's ID. +// +// CAUTION: +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. +// +// TODO: USE LOCK, we want to protect this mapping from accidental overwrites (because the key is not derived from the value via a collision-resistant hash) +// +// No errors are expected during normal operation. func IndexExecutionResult(w storage.Writer, blockID flow.Identifier, resultID flow.Identifier) error { return UpsertByKey(w, MakePrefix(codeIndexExecutionResultByBlock, blockID), resultID) } -// LookupExecutionResult finds execution result ID by block +// LookupExecutionResult retrieves the Execution Node's OWN Execution Result ID for the specified block. +// Intended for Execution Node only. For every block executed by this node, this index should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a block executed by this node func LookupExecutionResult(r storage.Reader, blockID flow.Identifier, resultID *flow.Identifier) error { return RetrieveByKey(r, MakePrefix(codeIndexExecutionResultByBlock, blockID), resultID) } +// ExistExecutionResult checks if the execution node has its OWN Execution Result for the specified block. +// No errors are expected during normal operation. func ExistExecutionResult(r storage.Reader, blockID flow.Identifier) (bool, error) { return KeyExists(r, MakePrefix(codeIndexExecutionResultByBlock, blockID)) } -// RemoveExecutionResultIndex removes execution result indexed by the given blockID +// RemoveExecutionResultIndex removes Execution Node's OWN Execution Result for the given blockID. +// CAUTION: this is for recovery purposes only, and should not be used during normal operations +// It returns nil if the collection does not exist. +// No errors are expected during normal operation. func RemoveExecutionResultIndex(w storage.Writer, blockID flow.Identifier) error { return RemoveByKey(w, MakePrefix(codeIndexExecutionResultByBlock, blockID)) } diff --git a/storage/operation/spork.go b/storage/operation/spork.go new file mode 100644 index 00000000000..1c1c625e377 --- /dev/null +++ b/storage/operation/spork.go @@ -0,0 +1,40 @@ +package operation + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertSporkID inserts the spork ID for the present spork. +// This values is inserted exactly once when bootstrapping the state and +// should always be present for a properly bootstrapped node. +// CAUTION: OVERWRITES existing data (potential for data corruption). +// +// No errors are expected during normal operation. +func InsertSporkID(w storage.Writer, sporkID flow.Identifier) error { + return UpsertByKey(w, MakePrefix(codeSporkID), sporkID) +} + +// RetrieveSporkID retrieves the spork ID for the present spork. +// This values should always be present for a properly bootstrapped node. +// No errors are expected during normal operation. +func RetrieveSporkID(r storage.Reader, sporkID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeSporkID), sporkID) +} + +// InsertSporkRootBlockHeight inserts the spork root block height for the present spork. +// This values is inserted exactly once when bootstrapping the state and +// should always be present for a properly bootstrapped node. +// CAUTION: OVERWRITES existing data (potential for data corruption). +// +// No errors are expected during normal operation. +func InsertSporkRootBlockHeight(w storage.Writer, height uint64) error { + return UpsertByKey(w, MakePrefix(codeSporkRootBlockHeight), height) +} + +// RetrieveSporkRootBlockHeight retrieves the spork root block height for the present spork. +// This values should always be present for a properly bootstrapped node. +// No errors are expected during normal operation. +func RetrieveSporkRootBlockHeight(r storage.Reader, height *uint64) error { + return RetrieveByKey(r, MakePrefix(codeSporkRootBlockHeight), height) +} diff --git a/storage/operation/spork_test.go b/storage/operation/spork_test.go new file mode 100644 index 00000000000..7e739764d93 --- /dev/null +++ b/storage/operation/spork_test.go @@ -0,0 +1,31 @@ +package operation_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestSporkID_InsertRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + sporkID := unittest.IdentifierFixture() + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertSporkID(rw.Writer(), sporkID) + }) + require.NoError(t, err) + + var actual flow.Identifier + err = operation.RetrieveSporkID(db.Reader(), &actual) + require.NoError(t, err) + + assert.Equal(t, sporkID, actual) + }) +} diff --git a/storage/operation/transaction_results.go b/storage/operation/transaction_results.go index 174a4f321a9..a97197a5cde 100644 --- a/storage/operation/transaction_results.go +++ b/storage/operation/transaction_results.go @@ -27,19 +27,14 @@ func RetrieveTransactionResultByIndex(r storage.Reader, blockID flow.Identifier, // tx_index index. This correctly handles cases of duplicate transactions within block. func LookupTransactionResultsByBlockIDUsingIndex(r storage.Reader, blockID flow.Identifier, txResults *[]flow.TransactionResult) error { - txErrIterFunc := func() (CheckFunc, CreateFunc, HandleFunc) { - check := func(_ []byte) (bool, error) { - return true, nil - } + txErrIterFunc := func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { var val flow.TransactionResult - create := func() interface{} { - return &val - } - handle := func() error { - *txResults = append(*txResults, val) - return nil + err = getValue(&val) + if err != nil { + return true, err } - return check, create, handle + *txResults = append(*txResults, val) + return false, nil } return TraverseByPrefix(r, MakePrefix(codeTransactionResultIndex, blockID), txErrIterFunc, storage.DefaultIteratorOptions()) @@ -94,19 +89,14 @@ func RetrieveLightTransactionResultByIndex(r storage.Reader, blockID flow.Identi // tx_index index. This correctly handles cases of duplicate transactions within block. func LookupLightTransactionResultsByBlockIDUsingIndex(r storage.Reader, blockID flow.Identifier, txResults *[]flow.LightTransactionResult) error { - txErrIterFunc := func() (CheckFunc, CreateFunc, HandleFunc) { - check := func(_ []byte) (bool, error) { - return true, nil - } + txErrIterFunc := func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { var val flow.LightTransactionResult - create := func() interface{} { - return &val + err = getValue(&val) + if err != nil { + return true, err } - handle := func() error { - *txResults = append(*txResults, val) - return nil - } - return check, create, handle + *txResults = append(*txResults, val) + return false, nil } return TraverseByPrefix(r, MakePrefix(codeLightTransactionResultIndex, blockID), txErrIterFunc, storage.DefaultIteratorOptions()) @@ -147,19 +137,14 @@ func TransactionResultErrorMessagesExists(r storage.Reader, blockID flow.Identif // LookupTransactionResultErrorMessagesByBlockIDUsingIndex retrieves all tx result error messages for a block, by using // tx_index index. This correctly handles cases of duplicate transactions within block. func LookupTransactionResultErrorMessagesByBlockIDUsingIndex(r storage.Reader, blockID flow.Identifier, txResultErrorMessages *[]flow.TransactionResultErrorMessage) error { - txErrIterFunc := func() (CheckFunc, CreateFunc, HandleFunc) { - check := func(_ []byte) (bool, error) { - return true, nil - } + txErrIterFunc := func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { var val flow.TransactionResultErrorMessage - create := func() interface{} { - return &val - } - handle := func() error { - *txResultErrorMessages = append(*txResultErrorMessages, val) - return nil + err = getValue(&val) + if err != nil { + return true, err } - return check, create, handle + *txResultErrorMessages = append(*txResultErrorMessages, val) + return false, nil } return TraverseByPrefix(r, MakePrefix(codeTransactionResultErrorMessageIndex, blockID), txErrIterFunc, storage.DefaultIteratorOptions()) diff --git a/storage/operation/views.go b/storage/operation/views.go index 6060d514c37..c45ffb23222 100644 --- a/storage/operation/views.go +++ b/storage/operation/views.go @@ -6,22 +6,40 @@ import ( "github.com/onflow/flow-go/storage" ) -// UpsertSafetyData inserts safety data into the database. +// UpsertSafetyData inserts or updates the given safety data for this node. +// Intended for consensus participants only (consensus and collector nodes). +// Here, `chainID` specifies which consensus instance specifically the node participates in. +// CAUTION: OVERWRITES existing data (potential for data corruption). +// +// No errors are expected during normal operation. func UpsertSafetyData(w storage.Writer, chainID flow.ChainID, safetyData *hotstuff.SafetyData) error { return UpsertByKey(w, MakePrefix(codeSafetyData, chainID), safetyData) } -// RetrieveSafetyData retrieves safety data from the database. +// RetrieveSafetyData retrieves the safety data for this node. +// Intended for consensus participants only (consensus and collector nodes). +// Here, `chainID` specifies which consensus instance specifically the node participates in. +// For consensus and collector nodes, this value should always exist (for the correct chainID). +// No errors are expected during normal operation. func RetrieveSafetyData(r storage.Reader, chainID flow.ChainID, safetyData *hotstuff.SafetyData) error { return RetrieveByKey(r, MakePrefix(codeSafetyData, chainID), safetyData) } -// UpsertLivenessData updates liveness data in the database. +// UpsertLivenessData inserts or updates the given liveness data for this node. +// Intended for consensus participants only (consensus and collector nodes). +// Here, `chainID` specifies which consensus instance specifically the node participates in. +// CAUTION: OVERWRITES existing data (potential for data corruption). +// +// No errors are expected during normal operation. func UpsertLivenessData(w storage.Writer, chainID flow.ChainID, livenessData *hotstuff.LivenessData) error { return UpsertByKey(w, MakePrefix(codeLivenessData, chainID), livenessData) } -// RetrieveLivenessData retrieves liveness data from the database. +// RetrieveSafetyData retrieves the safety data for this node. +// Intended for consensus participants only (consensus and collector nodes). +// Here, `chainID` specifies which consensus instance specifically the node participates in. +// For consensus and collector nodes, this value should always exist (for the correct chainID). +// No errors are expected during normal operation. func RetrieveLivenessData(r storage.Reader, chainID flow.ChainID, livenessData *hotstuff.LivenessData) error { return RetrieveByKey(r, MakePrefix(codeLivenessData, chainID), livenessData) } diff --git a/storage/operation/writes.go b/storage/operation/writes.go index b46d45c706b..920cc232d3d 100644 --- a/storage/operation/writes.go +++ b/storage/operation/writes.go @@ -30,6 +30,24 @@ func UpsertByKey(w storage.Writer, key []byte, val interface{}) error { return nil } +// Upserting returns a functor, whose execution will append the given key-value-pair to the provided +// storage writer (typically a pending batch of database writes). +func Upserting(key []byte, val interface{}) func(storage.Writer) error { + value, err := msgpack.Marshal(val) + return func(w storage.Writer) error { + if err != nil { + return irrecoverable.NewExceptionf("failed to encode value: %w", err) + } + + err = w.Set(key, value) + if err != nil { + return irrecoverable.NewExceptionf("failed to store data: %w", err) + } + + return nil + } +} + // RemoveByKey removes the entity with the given key, if it exists. If it doesn't // exist, this is a no-op. // Error returns: diff --git a/storage/operation/writes_test.go b/storage/operation/writes_test.go index 8b31071014b..c3538f4ee69 100644 --- a/storage/operation/writes_test.go +++ b/storage/operation/writes_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" @@ -26,20 +26,20 @@ func TestReadWrite(t *testing.T) { // Test read nothing should return not found var item Entity - err := operation.Retrieve(e.Key(), &item)(r) + err := operation.RetrieveByKey(r, e.Key(), &item) require.True(t, errors.Is(err, storage.ErrNotFound), "expected not found error") require.NoError(t, withWriter(operation.Upsert(e.Key(), e))) var readBack Entity - require.NoError(t, operation.Retrieve(e.Key(), &readBack)(r)) + require.NoError(t, operation.RetrieveByKey(r, e.Key(), &readBack)) require.Equal(t, e, readBack, "expected retrieved value to match written value") // Test write again should overwrite newEntity := Entity{ID: 42} require.NoError(t, withWriter(operation.Upsert(e.Key(), newEntity))) - require.NoError(t, operation.Retrieve(e.Key(), &readBack)(r)) + require.NoError(t, operation.RetrieveByKey(r, e.Key(), &readBack)) require.Equal(t, newEntity, readBack, "expected overwritten value to be retrieved") // Test write should not overwrite a different key @@ -47,7 +47,7 @@ func TestReadWrite(t *testing.T) { require.NoError(t, withWriter(operation.Upsert(anotherEntity.Key(), anotherEntity))) var anotherReadBack Entity - require.NoError(t, operation.Retrieve(anotherEntity.Key(), &anotherReadBack)(r)) + require.NoError(t, operation.RetrieveByKey(r, anotherEntity.Key(), &anotherReadBack)) require.Equal(t, anotherEntity, anotherReadBack, "expected different key to return different value") }) } @@ -66,7 +66,9 @@ func TestReadWriteMalformed(t *testing.T) { // Test read should return decoding error var exists bool - require.NoError(t, operation.Exists(e.Key(), &exists)(r)) + var err error + exists, err = operation.KeyExists(r, e.Key()) + require.NoError(t, err) require.False(t, exists, "expected key to not exist") }) } @@ -94,7 +96,7 @@ func TestBatchWrite(t *testing.T) { // Verify that each entity can be read back for _, e := range entities { var readBack Entity - require.NoError(t, operation.Retrieve(e.Key(), &readBack)(r)) + require.NoError(t, operation.RetrieveByKey(r, e.Key(), &readBack)) require.Equal(t, e, readBack, "expected retrieved value to match written value for entity ID %d", e.ID) } @@ -111,7 +113,7 @@ func TestBatchWrite(t *testing.T) { // Verify that each entity has been removed for _, e := range entities { var readBack Entity - err := operation.Retrieve(e.Key(), &readBack)(r) + err := operation.RetrieveByKey(r, e.Key(), &readBack) require.True(t, errors.Is(err, storage.ErrNotFound), "expected not found error for entity ID %d after removal", e.ID) } }) @@ -195,7 +197,9 @@ func TestRemove(t *testing.T) { e := Entity{ID: 1337} var exists bool - require.NoError(t, operation.Exists(e.Key(), &exists)(r)) + var err error + exists, err = operation.KeyExists(r, e.Key()) + require.NoError(t, err) require.False(t, exists, "expected key to not exist") // Test delete nothing should return OK @@ -204,54 +208,40 @@ func TestRemove(t *testing.T) { // Test write, delete, then read should return not found require.NoError(t, withWriter(operation.Upsert(e.Key(), e))) - require.NoError(t, operation.Exists(e.Key(), &exists)(r)) + exists, err = operation.KeyExists(r, e.Key()) + require.NoError(t, err) require.True(t, exists, "expected key to exist") require.NoError(t, withWriter(operation.Remove(e.Key()))) var item Entity - err := operation.Retrieve(e.Key(), &item)(r) + err = operation.RetrieveByKey(r, e.Key(), &item) require.True(t, errors.Is(err, storage.ErrNotFound), "expected not found error after delete") }) } func TestRemoveDiskUsage(t *testing.T) { - count := 10000 - wg := sync.WaitGroup{} - // 10000 chunk data packs will produce 4 log files - // Wait for the 4 log file to be deleted - wg.Add(4) - - // Create an event listener to monitor compaction events - listener := pebble.EventListener{ - // Capture when compaction ends - WALDeleted: func(info pebble.WALDeleteInfo) { - wg.Done() - }, - } + const count = 10000 - // Configure Pebble DB with the event listener opts := &pebble.Options{ - MemTableSize: 64 << 20, // required for rotating WAL - EventListener: &listener, + MemTableSize: 64 << 20, // required for rotating WAL } dbtest.RunWithPebbleDB(t, opts, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter, dir string, db *pebble.DB) { - items := make([]*flow.ChunkDataPack, count) - - // prefix is needed for defining the key range for compaction prefix := []byte{1} + endPrefix := []byte{2} getKey := func(c *flow.ChunkDataPack) []byte { return append(prefix, c.ChunkID[:]...) } + items := make([]*flow.ChunkDataPack, count) for i := 0; i < count; i++ { chunkID := unittest.IdentifierFixture() chunkDataPack := unittest.ChunkDataPackFixture(chunkID) items[i] = chunkDataPack } - // Insert 100 entities + // 1. Insert 10000 entities. require.NoError(t, withWriter(func(writer storage.Writer) error { for i := 0; i < count; i++ { if err := operation.Upsert(getKey(items[i]), items[i])(writer); err != nil { @@ -260,9 +250,16 @@ func TestRemoveDiskUsage(t *testing.T) { } return nil })) + + // 2. Flush and compact to get a stable state. + require.NoError(t, db.Flush()) + require.NoError(t, db.Compact(prefix, endPrefix, true)) + + // 3. Get sizeBefore. sizeBefore := getFolderSize(t, dir) + t.Logf("Size after initial write and compact: %d", sizeBefore) - // Remove all entities + // 4. Remove all entities require.NoError(t, withWriter(func(writer storage.Writer) error { for i := 0; i < count; i++ { if err := operation.Remove(getKey(items[i]))(writer); err != nil { @@ -272,29 +269,18 @@ func TestRemoveDiskUsage(t *testing.T) { return nil })) - // Trigger compaction - require.NoError(t, db.Compact(prefix, []byte{2}, true)) - - // Use a timer to implement a timeout for wg.Wait() - timeout := time.After(30 * time.Second) - done := make(chan struct{}) - - go func() { - wg.Wait() - close(done) - }() - - select { - case <-done: - // WaitGroup finished successfully - case <-timeout: - t.Fatal("Test timed out waiting for WAL files to be deleted") - } - - // Verify the disk usage is reduced - sizeAfter := getFolderSize(t, dir) - require.Greater(t, sizeBefore, sizeAfter, - fmt.Sprintf("expected disk usage to be reduced after compaction, before: %d, after: %d", sizeBefore, sizeAfter)) + // 5. Flush and compact again. + require.NoError(t, db.Flush()) + require.NoError(t, db.Compact(prefix, endPrefix, true)) + + // 6. Verify the disk usage is reduced. + require.Eventually(t, func() bool { + sizeAfter := getFolderSize(t, dir) + t.Logf("Size after delete and compact: %d", sizeAfter) + return sizeAfter < sizeBefore + }, 30*time.Second, 200*time.Millisecond, + "expected disk usage to be reduced after compaction. before: %d, after: %d", + sizeBefore, getFolderSize(t, dir)) }) } @@ -313,7 +299,7 @@ func TestConcurrentWrite(t *testing.T) { require.NoError(t, withWriter(operation.Upsert(e.Key(), e))) var readBack Entity - require.NoError(t, operation.Retrieve(e.Key(), &readBack)(r)) + require.NoError(t, operation.RetrieveByKey(r, e.Key(), &readBack)) require.Equal(t, e, readBack, "expected retrieved value to match written value for key %d", i) }(i) } @@ -345,7 +331,7 @@ func TestConcurrentRemove(t *testing.T) { // Check that the item is no longer retrievable var item Entity - err := operation.Retrieve(e.Key(), &item)(r) + err := operation.RetrieveByKey(r, e.Key(), &item) require.True(t, errors.Is(err, storage.ErrNotFound), "expected not found error after delete for key %d", i) }(i) } @@ -394,7 +380,9 @@ func TestRemoveByPrefix(t *testing.T) { // Verify that the keys in the prefix range have been removed for i, key := range keys { var exists bool - require.NoError(t, operation.Exists(key, &exists)(r)) + var err error + exists, err = operation.KeyExists(r, key) + require.NoError(t, err) t.Logf("key %x exists: %t", key, exists) deleted := includeStart <= i && i <= includeEnd @@ -406,10 +394,10 @@ func TestRemoveByPrefix(t *testing.T) { // Verify that after the removal, Traverse the removed prefix would return nothing removedKeys := make([]string, 0) - err := operation.Traverse(prefix, operation.KeyOnlyIterateFunc(func(key []byte) error { + err := operation.TraverseByPrefix(r, prefix, func(key []byte, getValue func(destVal any) error) (bail bool, err error) { removedKeys = append(removedKeys, fmt.Sprintf("%x", key)) - return nil - }), storage.DefaultIteratorOptions())(r) + return false, nil + }, storage.DefaultIteratorOptions()) require.NoError(t, err) require.Len(t, removedKeys, 0, "expected no entries to be found when traversing the removed prefix") @@ -421,10 +409,10 @@ func TestRemoveByPrefix(t *testing.T) { } actual := make([][]byte, 0) - err = operation.Iterate([]byte{keys[0][0]}, storage.PrefixUpperBound(keys[len(keys)-1]), func(key []byte) error { + err = operation.IterateKeysByPrefixRange(r, []byte{keys[0][0]}, storage.PrefixUpperBound(keys[len(keys)-1]), func(key []byte) error { actual = append(actual, key) return nil - })(r) + }) require.NoError(t, err) require.Equal(t, expected, actual, "expected keys to match expected values") }) @@ -471,7 +459,9 @@ func TestRemoveByRange(t *testing.T) { // Verify that the keys in the prefix range have been removed for i, key := range keys { var exists bool - require.NoError(t, operation.Exists(key, &exists)(r)) + var err error + exists, err = operation.KeyExists(r, key) + require.NoError(t, err) t.Logf("key %x exists: %t", key, exists) deleted := includeStart <= i && i <= includeEnd @@ -518,7 +508,9 @@ func TestRemoveFrom(t *testing.T) { // Verify that the keys in the prefix range have been removed for i, key := range keys { var exists bool - require.NoError(t, operation.Exists(key, &exists)(r)) + var err error + exists, err = operation.KeyExists(r, key) + require.NoError(t, err) t.Logf("key %x exists: %t", key, exists) deleted := includeStart <= i && i <= includeEnd diff --git a/storage/operations.go b/storage/operations.go index d1ccbaf9493..06e92f24aff 100644 --- a/storage/operations.go +++ b/storage/operations.go @@ -2,7 +2,6 @@ package storage import ( "io" - "sync" ) // Iterator is an interface for iterating over key-value pairs in a storage backend. @@ -62,6 +61,7 @@ type IteratorOption struct { BadgerIterateKeyOnly bool // default false } +// TODO: convert into a var func DefaultIteratorOptions() IteratorOption { return IteratorOption{ // only needed for badger. ignored by pebble @@ -143,14 +143,6 @@ type ReaderBatchWriter interface { // - The writer cannot be used concurrently for writing. Writer() Writer - // Lock tries to acquire the lock for the batch. - // if the lock is already acquired by this same batch from other pending db operations, - // then it will not be blocked and can continue updating the batch, which prevents a re-entrant deadlock. - // Note the ReaderBatchWriter is not concurrent-safe, so the caller must ensure that - // the batch is not used concurrently by multiple goroutines. - // CAUTION: The caller must ensure that no other references exist for the input lock. - Lock(*sync.Mutex) - // AddCallback adds a callback to execute after the batch has been flush // regardless the batch update is succeeded or failed. // The error parameter is the error returned by the batch update. @@ -170,20 +162,27 @@ type DB interface { // NewBatch create a new batch for writing. NewBatch() Batch + + // Close closes the database and releases all resources. + // No errors are expected during normal operation. + Close() error } // Batch is an interface for a batch of writes to a storage backend. -// The batch is pending until it is committed. -// Useful for dynamically adding writes to the batch +// The batch is pending until it is committed. Useful for dynamically adding writes to the batch. type Batch interface { ReaderBatchWriter // Commit applies the batched updates to the database. + // Commit may be called at most once per Batch. + // No errors are expected during normal operation. Commit() error // Close releases memory of the batch. + // Close must be called exactly once per Batch. // This can be called as a defer statement immediately after creating Batch // to reduce risk of unbounded memory consumption. + // No errors are expected during normal operation. Close() error } @@ -196,6 +195,12 @@ func OnlyWriter(fn func(Writer) error) func(ReaderBatchWriter) error { } // OnCommitSucceed adds a callback to execute after the batch has been successfully committed. +// +// Context on why we don't add this method to the ReaderBatchWriter: +// Because the implementation of the ReaderBatchWriter interface would have to provide an implementation +// for AddSuccessCallback, which can be derived for free from the AddCallback method. +// It's better avoid using AddCallback directly and use OnCommitSucceed instead, +// because you might write `if err != nil` by mistake, which is a golang idiom for error handling func OnCommitSucceed(b ReaderBatchWriter, onSuccessFn func()) { b.AddCallback(func(err error) { if err == nil { diff --git a/storage/payloads.go b/storage/payloads.go index 0094c6a2ad9..fbb9c2d7883 100644 --- a/storage/payloads.go +++ b/storage/payloads.go @@ -7,9 +7,6 @@ import ( // Payloads represents persistent storage for payloads. type Payloads interface { - // Store will store a payload and index its contents. - Store(blockID flow.Identifier, payload *flow.Payload) error - // ByBlockID returns the payload with the given hash. It is available for // finalized and ambiguous blocks. ByBlockID(blockID flow.Identifier) (*flow.Payload, error) diff --git a/storage/pebble/batch.go b/storage/pebble/batch.go index 9a45e55bc02..2bba07cb9b0 100644 --- a/storage/pebble/batch.go +++ b/storage/pebble/batch.go @@ -3,9 +3,10 @@ package pebble import ( "sync" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" ) +// TODO: unused? type Batch struct { writer *pebble.Batch diff --git a/storage/pebble/bootstrap.go b/storage/pebble/bootstrap.go index 0ccacd963ce..e70b1fec389 100644 --- a/storage/pebble/bootstrap.go +++ b/storage/pebble/bootstrap.go @@ -7,7 +7,7 @@ import ( "path/filepath" "time" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/rs/zerolog" "go.uber.org/atomic" "golang.org/x/sync/errgroup" diff --git a/storage/pebble/bootstrap_test.go b/storage/pebble/bootstrap_test.go index 6412c7320ec..4225b5e5d47 100644 --- a/storage/pebble/bootstrap_test.go +++ b/storage/pebble/bootstrap_test.go @@ -9,7 +9,7 @@ import ( "path" "testing" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/rs/zerolog" "github.com/stretchr/testify/require" diff --git a/storage/pebble/config.go b/storage/pebble/config.go index 99819454de1..06e8497d50d 100644 --- a/storage/pebble/config.go +++ b/storage/pebble/config.go @@ -1,8 +1,8 @@ package pebble import ( - "github.com/cockroachdb/pebble" - "github.com/cockroachdb/pebble/bloom" + "github.com/cockroachdb/pebble/v2" + "github.com/cockroachdb/pebble/v2/bloom" "github.com/rs/zerolog" "github.com/onflow/flow-go/storage/util" @@ -14,7 +14,7 @@ func DefaultPebbleOptions(logger zerolog.Logger, cache *pebble.Cache, comparer * opts := &pebble.Options{ Cache: cache, Comparer: comparer, - FormatMajorVersion: pebble.FormatNewest, + FormatMajorVersion: pebble.FormatVirtualSSTables, // Soft and hard limits on read amplificaction of L0 respectfully. L0CompactionThreshold: 2, diff --git a/storage/pebble/open.go b/storage/pebble/open.go index 4f804a35306..05fefc3d246 100644 --- a/storage/pebble/open.go +++ b/storage/pebble/open.go @@ -6,10 +6,11 @@ import ( "os" "path/filepath" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/hashicorp/go-multierror" "github.com/rs/zerolog" + "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/pebble/registers" ) @@ -38,7 +39,7 @@ func NewBootstrappedRegistersWithPath(logger zerolog.Logger, dir string) (*Regis } // OpenRegisterPebbleDB opens the database -// The difference between OpenDefaultPebbleDB is that it uses +// The difference between openDefaultPebbleDB is that it uses // a customized comparer (NewMVCCComparer) which is needed to // implement finding register values at any given height using // pebble's SeekPrefixGE function @@ -55,11 +56,9 @@ func OpenRegisterPebbleDB(logger zerolog.Logger, dir string) (*pebble.DB, error) return db, nil } -// OpenDefaultPebbleDB opens a pebble database using default options, +// openDefaultPebbleDB opens a pebble database using default options, // such as cache size and comparer -// If the pebbleDB is not bootstrapped at this folder, it will auto-bootstrap it, -// use MustOpenDefaultPebbleDB if you want to return error instead -func OpenDefaultPebbleDB(logger zerolog.Logger, dir string) (*pebble.DB, error) { +func openDefaultPebbleDB(logger zerolog.Logger, dir string) (*pebble.DB, error) { cache := pebble.NewCache(DefaultPebbleCacheSize) defer cache.Unref() opts := DefaultPebbleOptions(logger, cache, pebble.DefaultComparer) @@ -71,43 +70,76 @@ func OpenDefaultPebbleDB(logger zerolog.Logger, dir string) (*pebble.DB, error) return db, nil } -// MustOpenDefaultPebbleDB returns error if the pebbleDB is not bootstrapped at this folder +// ShouldOpenDefaultPebbleDB returns error if the pebbleDB is not bootstrapped at this folder // if bootstrapped, then open the pebbleDB -func MustOpenDefaultPebbleDB(logger zerolog.Logger, dir string) (*pebble.DB, error) { - err := IsPebbleInitialized(dir) - if err != nil { +func ShouldOpenDefaultPebbleDB(logger zerolog.Logger, dir string) (*pebble.DB, error) { + ok, err := IsPebbleFolder(dir) + if err != nil || !ok { return nil, fmt.Errorf("pebble db is not initialized: %w", err) } - return OpenDefaultPebbleDB(logger, dir) + return SafeOpen(logger, dir) } -// IsPebbleInitialized checks if the given folder contains a valid Pebble DB. +// SafeOpen open a pebble database at the given directory. +// It opens the database only if the directory: +// 1. does not exist, then it will create this directory +// 2. is empty +// 3. was opened before, in which case have all pebble required files +// It returns an error if the directory is not empty and missing required pebble files. +// more specifically, if the folder is a badger folder, it will return an error because it would +// miss some pebble file. +func SafeOpen(logger zerolog.Logger, dataDir string) (*pebble.DB, error) { + ok, err := util.IsEmptyOrNotExists(dataDir) + if err != nil { + return nil, fmt.Errorf("error checking if folder is empty or does not exist: %w", err) + } + + // if the folder is empty or does not exist, then it can be used as a Pebble folder + if ok { + return openDefaultPebbleDB(logger, dataDir) + } + + // note, a badger folder does not have MANIFEST-* file, so this will return error + // and prevent opening a badger folder as a pebble folder + ok, err = folderHaveAllPebbleFiles(dataDir) + if err != nil || !ok { + return nil, fmt.Errorf("folder %s is not a valid pebble folder: %w", dataDir, err) + } + + return openDefaultPebbleDB(logger, dataDir) +} + +// IsPebbleFolder checks if the given folder contains a valid Pebble DB. // return error if the folder does not exist, is not a directory, or is missing required files // return nil if the folder contains a valid Pebble DB -func IsPebbleInitialized(folderPath string) error { +func IsPebbleFolder(folderPath string) (bool, error) { // Check if the folder exists info, err := os.Stat(folderPath) if os.IsNotExist(err) { - return fmt.Errorf("directory does not exist: %s", folderPath) + return false, fmt.Errorf("directory does not exist: %s", folderPath) } if !info.IsDir() { - return fmt.Errorf("not a directory: %s", folderPath) + return false, fmt.Errorf("not a directory: %s", folderPath) } + return folderHaveAllPebbleFiles(folderPath) +} + +func folderHaveAllPebbleFiles(folderPath string) (bool, error) { // Look for Pebble-specific files requiredFiles := []string{"MANIFEST-*"} for _, pattern := range requiredFiles { matches, err := filepath.Glob(filepath.Join(folderPath, pattern)) if err != nil { - return fmt.Errorf("error checking for files: %v", err) + return false, fmt.Errorf("error checking for files: %v", err) } if len(matches) == 0 { - return fmt.Errorf("missing required file: %s", pattern) + return false, fmt.Errorf("missing required file: %s", pattern) } } - return nil + return true, nil } // ReadHeightsFromBootstrappedDB reads the first and latest height from a bootstrapped register db diff --git a/storage/pebble/open_test.go b/storage/pebble/open_test.go index e941ba2ab46..2a19c506d75 100644 --- a/storage/pebble/open_test.go +++ b/storage/pebble/open_test.go @@ -77,24 +77,59 @@ func TestNewBootstrappedRegistersWithPath(t *testing.T) { }) } -func TestMustOpenDefaultPebbleDB(t *testing.T) { +func TestSafeOpen(t *testing.T) { t.Parallel() unittest.RunWithTempDir(t, func(dir string) { logger := unittest.Logger() - // verify error is returned when the db is not bootstrapped - _, err := MustOpenDefaultPebbleDB(logger, dir) + // create an empty folder + pebbleDB, err := SafeOpen(logger, dir) + require.NoError(t, err) + require.NoError(t, pebbleDB.Close()) + + // can be opened again + db, err := SafeOpen(logger, dir) + require.NoError(t, err) + require.NoError(t, db.Close()) + }) +} + +func TestSafeOpenFailIfDirIsUsedByBadgerDB(t *testing.T) { + t.Parallel() + unittest.RunWithTempDir(t, func(dir string) { + logger := unittest.Logger() + // create a badger db + badgerDB := unittest.BadgerDB(t, dir) + require.NoError(t, badgerDB.Close()) + + _, err := SafeOpen(logger, dir) + require.Error(t, err) + require.Contains(t, err.Error(), "is not a valid pebble folder") + }) +} + +func TestShouldOpenDefaultPebbleDB(t *testing.T) { + t.Parallel() + unittest.RunWithTempDir(t, func(dir string) { + logger := unittest.Logger() + // verify error if directy not exist + _, err := ShouldOpenDefaultPebbleDB(logger, dir+"/not-exist") + require.Error(t, err) + require.Contains(t, err.Error(), "not initialized") + + // verify error if directory exist but not empty + _, err = ShouldOpenDefaultPebbleDB(logger, dir) require.Error(t, err) require.Contains(t, err.Error(), "not initialized") // bootstrap the db - db, err := OpenDefaultPebbleDB(logger, dir) + db, err := SafeOpen(logger, dir) require.NoError(t, err) require.NoError(t, initHeights(db, uint64(10))) require.NoError(t, db.Close()) fmt.Println(dir) // verify no error is returned when the db is bootstrapped - db, err = MustOpenDefaultPebbleDB(logger, dir) + db, err = ShouldOpenDefaultPebbleDB(logger, dir) require.NoError(t, err) h, err := latestStoredHeight(db) @@ -103,3 +138,17 @@ func TestMustOpenDefaultPebbleDB(t *testing.T) { require.NoError(t, db.Close()) }) } + +func TestShouldOpenDefaultPebbleDBFailWhenOpeningBadgerDBDir(t *testing.T) { + t.Parallel() + unittest.RunWithTempDir(t, func(dir string) { + logger := unittest.Logger() + // create a badger db + badgerDB := unittest.BadgerDB(t, dir) + require.NoError(t, badgerDB.Close()) + + _, err := ShouldOpenDefaultPebbleDB(logger, dir) + require.Error(t, err) + require.Contains(t, err.Error(), "pebble db is not initialized") + }) +} diff --git a/storage/pebble/registers.go b/storage/pebble/registers.go index 2f5c78e8ff1..aa08a19c77d 100644 --- a/storage/pebble/registers.go +++ b/storage/pebble/registers.go @@ -5,7 +5,7 @@ import ( "fmt" "math" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/pkg/errors" "go.uber.org/atomic" diff --git a/storage/pebble/registers/comparer.go b/storage/pebble/registers/comparer.go index 5997b8d0672..973bd9fe8ab 100644 --- a/storage/pebble/registers/comparer.go +++ b/storage/pebble/registers/comparer.go @@ -1,6 +1,6 @@ package registers -import "github.com/cockroachdb/pebble" +import "github.com/cockroachdb/pebble/v2" const ( // Size of the block height encoded in the key. diff --git a/storage/pebble/registers_test.go b/storage/pebble/registers_test.go index 8e85a32bfca..58899e72084 100644 --- a/storage/pebble/registers_test.go +++ b/storage/pebble/registers_test.go @@ -9,7 +9,7 @@ import ( "strconv" "testing" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/storage/pebble/testutil.go b/storage/pebble/testutil.go index 94b0b0dfa10..6bce658bd44 100644 --- a/storage/pebble/testutil.go +++ b/storage/pebble/testutil.go @@ -3,7 +3,7 @@ package pebble import ( "testing" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/utils/unittest" diff --git a/storage/procedure/children.go b/storage/procedure/children.go new file mode 100644 index 00000000000..57072489fa2 --- /dev/null +++ b/storage/procedure/children.go @@ -0,0 +1,91 @@ +package procedure + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// IndexNewBlock will add parent-child index for the new block. +// - Each block has a parent, we use this parent-child relationship to build a reverse index +// for looking up children blocks for a given block. This is useful for forks recovery +// where we want to find all the pending children blocks for the lastest finalized block. +// +// When adding parent-child index for a new block, we will update two indexes: +// 1. Per protocol convention, blocks must be ingested in "ancestors-first" order. Hence, +// if a block is new (needs to be verified, to avoid state corruption in case of repeated +// calls), its set of persisted children is empty at the time of insertion. +// 2. Since the parent block has this new block as a child, we add to the parent block's children. +// There are two special cases for (2): +// - If the parent block is zero (i.e. genesis block), then we don't need to add this index. +// - If the parent block doesn't exist, then we will index the new block as the only child +// of the parent anyway. This is useful for bootstrapping nodes with truncated history. +func IndexNewBlock(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, parentID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertBlock) + } + + return insertNewBlock(lctx, rw, blockID, parentID) +} + +func IndexNewClusterBlock(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, parentID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertBlock) + } + + return insertNewBlock(lctx, rw, blockID, parentID) +} + +func insertNewBlock(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, parentID flow.Identifier) error { + // Step 1: index the child for the new block. + // the new block has no child, so adding an empty child index for it + err := operation.UpsertBlockChildren(lctx, rw.Writer(), blockID, nil) + if err != nil { + return fmt.Errorf("could not insert empty block children: %w", err) + } + + // Step 2: adding the second index for the parent block + // if the parent block is zero, for instance root block has no parent, + // then no need to add index for it + if parentID == flow.ZeroID { + return nil + } + + // if the parent block is not zero, depending on whether the parent block has + // children or not, we will either update the index or insert the index: + // when parent block doesn't exist, we will insert the block children. + // when parent block exists already, we will update the block children, + var childrenIDs flow.IdentifierList + err = operation.RetrieveBlockChildren(rw.GlobalReader(), parentID, &childrenIDs) + if err != nil && !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("could not look up block children: %w", err) + } + + // check we don't add a duplicate + for _, dupID := range childrenIDs { + if blockID == dupID { + return storage.ErrAlreadyExists + } + } + + // adding the new block to be another child of the parent + childrenIDs = append(childrenIDs, blockID) + + // saving the index + err = operation.UpsertBlockChildren(lctx, rw.Writer(), parentID, childrenIDs) + if err != nil { + return fmt.Errorf("could not update children index: %w", err) + } + + return nil +} + +// LookupBlockChildren looks up the IDs of all child blocks of the given parent block. +func LookupBlockChildren(r storage.Reader, blockID flow.Identifier, childrenIDs *flow.IdentifierList) error { + return operation.RetrieveBlockChildren(r, blockID, childrenIDs) +} diff --git a/storage/procedure/children_test.go b/storage/procedure/children_test.go new file mode 100644 index 00000000000..8ae2cb8b54b --- /dev/null +++ b/storage/procedure/children_test.go @@ -0,0 +1,148 @@ +package procedure_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/procedure" + "github.com/onflow/flow-go/utils/unittest" +) + +// after indexing a block by its parent, it should be able to retrieve the child block by the parentID +func TestIndexAndLookupChild(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(t, err) + defer lctx.Release() + + parentID := unittest.IdentifierFixture() + childID := unittest.IdentifierFixture() + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.IndexNewBlock(lctx, rw, childID, parentID) + }) + require.NoError(t, err) + + // retrieve child + var retrievedIDs flow.IdentifierList + err = procedure.LookupBlockChildren(db.Reader(), parentID, &retrievedIDs) + require.NoError(t, err) + + // retrieved child should be the stored child + require.Equal(t, flow.IdentifierList{childID}, retrievedIDs) + }) +} + +// if two blocks connect to the same parent, indexing the second block would have +// no effect, retrieving the child of the parent block will return the first block that +// was indexed. +func TestIndexTwiceAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(t, err) + defer lctx.Release() + + parentID := unittest.IdentifierFixture() + child1ID := unittest.IdentifierFixture() + child2ID := unittest.IdentifierFixture() + + // index the first child + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.IndexNewBlock(lctx, rw, child1ID, parentID) + }) + require.NoError(t, err) + + // index the second child + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.IndexNewBlock(lctx, rw, child2ID, parentID) + }) + require.NoError(t, err) + + var retrievedIDs flow.IdentifierList + err = procedure.LookupBlockChildren(db.Reader(), parentID, &retrievedIDs) + require.NoError(t, err) + + require.Equal(t, flow.IdentifierList{child1ID, child2ID}, retrievedIDs) + }) +} + +// if parent is zero, then we don't index it +func TestIndexZeroParent(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(t, err) + defer lctx.Release() + + childID := unittest.IdentifierFixture() + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.IndexNewBlock(lctx, rw, childID, flow.ZeroID) + }) + require.NoError(t, err) + + // zero id should have no children + var retrievedIDs flow.IdentifierList + err = procedure.LookupBlockChildren(db.Reader(), flow.ZeroID, &retrievedIDs) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +// lookup block children will only return direct childrens +func TestDirectChildren(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(t, err) + defer lctx.Release() + + b1 := unittest.IdentifierFixture() + b2 := unittest.IdentifierFixture() + b3 := unittest.IdentifierFixture() + b4 := unittest.IdentifierFixture() + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.IndexNewBlock(lctx, rw, b2, b1) + }) + require.NoError(t, err) + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.IndexNewBlock(lctx, rw, b3, b2) + }) + require.NoError(t, err) + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.IndexNewBlock(lctx, rw, b4, b3) + }) + require.NoError(t, err) + + // check the children of the first block + var retrievedIDs flow.IdentifierList + + err = procedure.LookupBlockChildren(db.Reader(), b1, &retrievedIDs) + require.NoError(t, err) + require.Equal(t, flow.IdentifierList{b2}, retrievedIDs) + + err = procedure.LookupBlockChildren(db.Reader(), b2, &retrievedIDs) + require.NoError(t, err) + require.Equal(t, flow.IdentifierList{b3}, retrievedIDs) + + err = procedure.LookupBlockChildren(db.Reader(), b3, &retrievedIDs) + require.NoError(t, err) + require.Equal(t, flow.IdentifierList{b4}, retrievedIDs) + + err = procedure.LookupBlockChildren(db.Reader(), b4, &retrievedIDs) + require.NoError(t, err) + require.Nil(t, retrievedIDs) + }) +} diff --git a/storage/procedure/cluster.go b/storage/procedure/cluster.go new file mode 100644 index 00000000000..04bebb021bb --- /dev/null +++ b/storage/procedure/cluster.go @@ -0,0 +1,260 @@ +package procedure + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// This file implements storage functions for blocks in cluster consensus. + +// InsertClusterBlock inserts a cluster consensus block, updating all associated indexes. +// +// CAUTION: +// - The caller must acquire the lock [storage.LockInsertOrFinalizeClusterBlock] and hold it +// until the database write has been committed. This lock allows `InsertClusterBlock` to verify +// that this block has not yet been indexed. In order to protect against accidental mutation +// of existing data, this read and subsequent writes must be performed as one atomic operation. +// Hence, the requirement to hold the lock until the write is committed. +// +// We return [storage.ErrAlreadyExists] if the block has already been persisted before, i.e. we only +// insert a block once. This error allows the caller to detect duplicate inserts. +// No other errors are expected during normal operation. +func InsertClusterBlock(lctx lockctx.Proof, rw storage.ReaderBatchWriter, block *cluster.Block) error { + // We need to enforce that each cluster block is inserted and indexed exactly once (no overwriting allowed): + // 1. We check that the lock [storage.LockInsertOrFinalizeClusterBlock] for cluster block insertion is held. + // 2. When calling `operation.InsertHeader`, we append the storage operations for inserting the header to the + // provided write batch. Note that `operation.InsertHeader` checks whether the header already exists, + // returning [storage.ErrAlreadyExists] if so. + // 3. We append all other storage indexing operations to the same write batch, without additional existence + // checks. This is safe, because this is the only place where these indexes are created, and we always + // store the block header first alongside the indices in one atomic batch. Hence, since we know from step 2 + // that the header did not exist before, we also know that none of the other indexes existed before either + // 4. We require that the caller holds the lock until the write batch has been committed. + // Thereby, we guarantee that no other thread can write data about the same block concurrently. + // When these constraints are met, we know that no overwrites occurred because `InsertHeader` + // includes guarantees that the key `blockID` has not yet been used before. + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { // 1. check lock + return fmt.Errorf("missing required lock: %s", storage.LockInsertOrFinalizeClusterBlock) + } + + // Here the key `blockID` is derived from the `block` via a collision-resistant hash function. + // Hence, two different blocks having the same key is practically impossible. + blockID := block.ID() + // 2. Store the block header; errors with [storage.ErrAlreadyExists] if some entry for `blockID` already exists + err := operation.InsertHeader(lctx, rw, blockID, block.Header) + if err != nil { + return fmt.Errorf("could not insert cluster block header: %w", err) + } + + // insert the block payload; without further overwrite checks (see above for explanation) + err = InsertClusterPayload(lctx, rw, blockID, block.Payload) + if err != nil { + return fmt.Errorf("could not insert cluster block payload: %w", err) + } + + // index the child block for recovery; without further overwrite checks (see above for explanation) + err = IndexNewClusterBlock(lctx, rw, blockID, block.Header.ParentID) + if err != nil { + return fmt.Errorf("could not index new cluster block block: %w", err) + } + return nil +} + +// RetrieveClusterBlock retrieves a cluster consensus block by block ID. +func RetrieveClusterBlock(r storage.Reader, blockID flow.Identifier, block *cluster.Block) error { + // retrieve the block header + var header flow.Header + err := operation.RetrieveHeader(r, blockID, &header) + if err != nil { + return fmt.Errorf("could not retrieve cluster block header: %w", err) + } + + // retrieve payload + var payload cluster.Payload + err = RetrieveClusterPayload(r, blockID, &payload) + if err != nil { + return fmt.Errorf("could not retrieve cluster block payload: %w", err) + } + + // overwrite block + *block = cluster.Block{ + Header: &header, + Payload: &payload, + } + return nil +} + +// RetrieveLatestFinalizedClusterHeader retrieves the latest finalized cluster block header from the specified cluster. +func RetrieveLatestFinalizedClusterHeader(r storage.Reader, clusterID flow.ChainID, final *flow.Header) error { + var latestFinalizedHeight uint64 + err := operation.RetrieveClusterFinalizedHeight(r, clusterID, &latestFinalizedHeight) + if err != nil { + return fmt.Errorf("could not retrieve latest finalized cluster block height: %w", err) + } + + var finalID flow.Identifier + err = operation.LookupClusterBlockHeight(r, clusterID, latestFinalizedHeight, &finalID) + if err != nil { + return fmt.Errorf("could not retrieve ID of latest finalized cluster block: %w", err) + } + + err = operation.RetrieveHeader(r, finalID, final) + if err != nil { + return fmt.Errorf("could not retrieve header of latest finalized cluster block: %w", err) + } + return nil +} + +// FinalizeClusterBlock finalizes a block in cluster consensus. +func FinalizeClusterBlock(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertOrFinalizeClusterBlock) + } + + r := rw.GlobalReader() + writer := rw.Writer() + // retrieve the header to check the parent + var header flow.Header + err := operation.RetrieveHeader(r, blockID, &header) + if err != nil { + return fmt.Errorf("could not retrieve header: %w", err) + } + + // get the chain ID, which determines which cluster state to query + clusterID := header.ChainID + + // retrieve the latest finalized cluster block height + var latestFinalizedHeight uint64 + err = operation.RetrieveClusterFinalizedHeight(r, clusterID, &latestFinalizedHeight) + if err != nil { + return fmt.Errorf("could not retrieve boundary: %w", err) + } + + // retrieve the ID of the latest finalized cluster block + var latestFinalizedBlockID flow.Identifier + err = operation.LookupClusterBlockHeight(r, clusterID, latestFinalizedHeight, &latestFinalizedBlockID) + if err != nil { + return fmt.Errorf("could not retrieve head: %w", err) + } + + // sanity check: the previously latest finalized block is the parent of the block we are now finalizing + if header.ParentID != latestFinalizedBlockID { + return fmt.Errorf("can't finalize non-child of chain head") + } + + // index the block by its height + err = operation.IndexClusterBlockHeight(lctx, writer, clusterID, header.Height, header.ID()) + if err != nil { + return fmt.Errorf("could not index cluster block height: %w", err) + } + + // update the finalized boundary + err = operation.UpsertClusterFinalizedHeight(lctx, writer, clusterID, header.Height) + if err != nil { + return fmt.Errorf("could not update finalized boundary: %w", err) + } + + // NOTE: we don't want to prune forks that have become invalid here, so + // that we can keep validating entities and generating slashing + // challenges for some time - the pruning should happen some place else + // after a certain delay of blocks + + return nil +} + +// InsertClusterPayload inserts the payload for a cluster block. It inserts +// both the collection and all constituent transactions, allowing duplicates. +func InsertClusterPayload(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, payload *cluster.Payload) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertOrFinalizeClusterBlock) + } + + var txIDs []flow.Identifier + err := operation.LookupCollectionPayload(rw.GlobalReader(), blockID, &txIDs) + if err == nil { + return fmt.Errorf("collection payload already exists for block %s: %w", blockID, storage.ErrAlreadyExists) + } + if err != storage.ErrNotFound { + return fmt.Errorf("unexpected error while attempting to retrieve collection payload: %w", err) + } + + // STEP 1: persist the collection and constituent transactions. + // A cluster payload essentially represents a single collection (batch of transactions) plus some auxilluary + // information. Storing the collection on its own allows us to also retrieve it independently of the cluster + // block's payload. We expect repeated requests to persist the same collection data here, because it is valid + // to propose the same collection in two competing forks. However, we don't have to worry about repeated calls, + // because collections and transactions are keyed by their respective content hashes. So a different value + // should produce a different key, making accidental overwrites with inconsistent values impossible. + // Here, we persist a reduced representation of the collection, only listing the constituent transactions by their hashes. + light := payload.Collection.Light() + writer := rw.Writer() + err = operation.UpsertCollection(writer, &light) // collection is keyed by content hash, hence no overwrite protection is needed + if err != nil { + return fmt.Errorf("could not insert payload collection: %w", err) + } + + // persist constituent transactions: + for _, colTx := range payload.Collection.Transactions { + err = operation.UpsertTransaction(writer, colTx.ID(), colTx) // as transaction is keyed by content hash, hence no overwrite protection is needed + if err != nil { + return fmt.Errorf("could not insert payload transaction: %w", err) + } + } + + // STEP 2: for the cluster block ID, index the consistent transactions plus the auxilluary data from the playload. + // Caution: Here we use the cluster block's ID as key, which is *not* uniquely determined by the indexed data. + // Hence, we must ensure that we are not accidentally overwriting existing data (in case of a bug in the calling + // code) with different values. This is ensured by the initial check confirming that the collection payload + // has not yet been indexed (and the assumption that `IndexReferenceBlockByClusterBlock` is called nowhere else). + txIDs = light.Transactions + err = operation.IndexCollectionPayload(lctx, writer, blockID, txIDs) + if err != nil { + return fmt.Errorf("could not index collection: %w", err) + } + + // insert the reference block ID + err = operation.IndexReferenceBlockByClusterBlock(lctx, writer, blockID, payload.ReferenceBlockID) + if err != nil { + return fmt.Errorf("could not insert reference block ID: %w", err) + } + + return nil +} + +// RetrieveClusterPayload retrieves a cluster consensus block payload by block ID. +func RetrieveClusterPayload(r storage.Reader, blockID flow.Identifier, payload *cluster.Payload) error { + // lookup the reference block ID + var refID flow.Identifier + err := operation.LookupReferenceBlockByClusterBlock(r, blockID, &refID) + if err != nil { + return fmt.Errorf("could not retrieve reference block ID: %w", err) + } + + // lookup collection transaction IDs + var txIDs []flow.Identifier + err = operation.LookupCollectionPayload(r, blockID, &txIDs) + if err != nil { + return fmt.Errorf("could not look up collection payload: %w", err) + } + + colTransactions := make([]*flow.TransactionBody, 0, len(txIDs)) + // retrieve individual transactions + for _, txID := range txIDs { + var nextTx flow.TransactionBody + err = operation.RetrieveTransaction(r, txID, &nextTx) + if err != nil { + return fmt.Errorf("could not retrieve transaction: %w", err) + } + colTransactions = append(colTransactions, &nextTx) + } + + *payload = cluster.PayloadFromTransactions(refID, colTransactions...) + + return nil +} diff --git a/storage/procedure/cluster_test.go b/storage/procedure/cluster_test.go new file mode 100644 index 00000000000..f35aa64ba8d --- /dev/null +++ b/storage/procedure/cluster_test.go @@ -0,0 +1,154 @@ +package procedure + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestInsertRetrieveClusterBlock(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + block := unittest.ClusterBlockFixture() + + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock) + require.NoError(t, err) + defer lctx.Release() + + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return InsertClusterBlock(lctx, rw, &block) + })) + + var retrieved cluster.Block + err = RetrieveClusterBlock(db.Reader(), block.ID(), &retrieved) + require.NoError(t, err) + + require.Equal(t, block, retrieved) + }) +} + +func TestFinalizeClusterBlock(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + parent := unittest.ClusterBlockFixture() + block := unittest.ClusterBlockWithParent(&parent) + + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + defer lctx.Release() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return InsertClusterBlock(lctx, rw, &parent) + })) + + // index parent as latest finalized block (manually writing respective indexes like in bootstrapping to skip transitive consistency checks) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockHeight(lctx, rw.Writer(), block.Header.ChainID, parent.Header.Height, parent.ID()) + })) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertClusterFinalizedHeight(lctx, rw.Writer(), block.Header.ChainID, parent.Header.Height) + })) + + // Insert new block and verify `FinalizeClusterBlock` procedure accepts it + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return InsertClusterBlock(lctx, rw, &block) + })) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return FinalizeClusterBlock(lctx, rw, block.Header.ID()) + })) + + // verify that the new block as been properly indexed as the latest finalized + var latestFinalizedHeight uint64 + var err error + err = operation.RetrieveClusterFinalizedHeight(db.Reader(), block.Header.ChainID, &latestFinalizedHeight) + require.NoError(t, err) + require.Equal(t, block.Header.Height, latestFinalizedHeight) + + var headID flow.Identifier + err = operation.LookupClusterBlockHeight(db.Reader(), block.Header.ChainID, latestFinalizedHeight, &headID) + require.NoError(t, err) + require.Equal(t, block.ID(), headID) + }) +} + +// TestDisconnectedFinalizedBlock verifies that finalization logic rejects finalizing a block whose parent is not the latest finalized block. +func TestDisconnectedFinalizedBlock(t *testing.T) { + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + defer lctx.Release() + + t.Run("finalizing C should fail because B is not yet finalized", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + _, _, blockC, _ := constructState(t, db, lctx) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return FinalizeClusterBlock(lctx, rw, blockC.ID()) + }) + require.Error(t, err) + require.NotErrorIs(t, err, storage.ErrAlreadyExists) + }) + }) + + t.Run("finalizing B and then C should succeed", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + _, blockB, blockC, _ := constructState(t, db, lctx) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return FinalizeClusterBlock(lctx, rw, blockB.ID()) + })) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return FinalizeClusterBlock(lctx, rw, blockC.ID()) + })) + }) + }) + + t.Run("finalizing B and then D should fail, because B is not the parent of D", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + _, blockB, _, blockD := constructState(t, db, lctx) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return FinalizeClusterBlock(lctx, rw, blockB.ID()) + })) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return FinalizeClusterBlock(lctx, rw, blockD.ID()) + }) + require.Error(t, err) + require.NotErrorIs(t, err, storage.ErrAlreadyExists) + }) + }) + +} + +// `constructState` initializes a stub of the following collector chain state: +// +// A ← B ← C +// ↖ D +func constructState(t *testing.T, db storage.DB, lctx lockctx.Proof) (blockA, blockB, blockC, blockD cluster.Block) { + blockA = unittest.ClusterBlockFixture() + blockB = unittest.ClusterBlockWithParent(&blockA) + blockC = unittest.ClusterBlockWithParent(&blockB) + blockD = unittest.ClusterBlockWithParent(&blockA) + + // Store all blocks + for _, b := range []cluster.Block{blockA, blockB, blockC, blockD} { + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return InsertClusterBlock(lctx, rw, &b) + })) + } + + // index `blockA` as latest finalized block (manually writing respective indexes like in bootstrapping to skip transitive consistency checks) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockHeight(lctx, rw.Writer(), blockA.Header.ChainID, blockA.Header.Height, blockA.ID()) + })) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertClusterFinalizedHeight(lctx, rw.Writer(), blockA.Header.ChainID, blockA.Header.Height) + })) + + return blockA, blockB, blockC, blockD +} diff --git a/storage/procedure/index.go b/storage/procedure/index.go new file mode 100644 index 00000000000..0025a7a5653 --- /dev/null +++ b/storage/procedure/index.go @@ -0,0 +1,82 @@ +package procedure + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +func InsertIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, index *flow.Index) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertBlock) + } + + // The following database operations are all indexing data by block ID, + // they don't need to check if the data is already stored, because the same check has been done + // when storing the block header, which is in the same batch update and holding the same lock. + // if there is no header stored for the block ID, it means no index data for the same block ID + // was stored either, as long as the same lock is held, the data is guaranteed to be consistent. + w := rw.Writer() + err := operation.IndexPayloadGuarantees(lctx, w, blockID, index.CollectionIDs) + if err != nil { + return fmt.Errorf("could not store guarantee index: %w", err) + } + err = operation.IndexPayloadSeals(lctx, w, blockID, index.SealIDs) + if err != nil { + return fmt.Errorf("could not store seal index: %w", err) + } + err = operation.IndexPayloadReceipts(lctx, w, blockID, index.ReceiptIDs) + if err != nil { + return fmt.Errorf("could not store receipts index: %w", err) + } + err = operation.IndexPayloadResults(lctx, w, blockID, index.ResultIDs) + if err != nil { + return fmt.Errorf("could not store results index: %w", err) + } + err = operation.IndexPayloadProtocolStateID(lctx, w, blockID, index.ProtocolStateID) + if err != nil { + return fmt.Errorf("could not store protocol state id: %w", err) + } + return nil +} + +func RetrieveIndex(r storage.Reader, blockID flow.Identifier, index *flow.Index) error { + var collIDs []flow.Identifier + err := operation.LookupPayloadGuarantees(r, blockID, &collIDs) + if err != nil { + return fmt.Errorf("could not retrieve guarantee index: %w", err) + } + var sealIDs []flow.Identifier + err = operation.LookupPayloadSeals(r, blockID, &sealIDs) + if err != nil { + return fmt.Errorf("could not retrieve seal index: %w", err) + } + var receiptIDs []flow.Identifier + err = operation.LookupPayloadReceipts(r, blockID, &receiptIDs) + if err != nil { + return fmt.Errorf("could not retrieve receipts index: %w", err) + } + var resultsIDs []flow.Identifier + err = operation.LookupPayloadResults(r, blockID, &resultsIDs) + if err != nil { + return fmt.Errorf("could not retrieve results index: %w", err) + } + var stateID flow.Identifier + err = operation.LookupPayloadProtocolStateID(r, blockID, &stateID) + if err != nil { + return fmt.Errorf("could not retrieve protocol state id: %w", err) + } + + *index = flow.Index{ + CollectionIDs: collIDs, + SealIDs: sealIDs, + ReceiptIDs: receiptIDs, + ResultIDs: resultsIDs, + ProtocolStateID: stateID, + } + return nil +} diff --git a/storage/procedure/index_test.go b/storage/procedure/index_test.go new file mode 100644 index 00000000000..7133d7b6ae1 --- /dev/null +++ b/storage/procedure/index_test.go @@ -0,0 +1,36 @@ +package procedure + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestInsertRetrieveIndex(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + blockID := unittest.IdentifierFixture() + index := unittest.IndexFixture() + + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(t, err) + defer lctx.Release() + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return InsertIndex(lctx, rw, blockID, index) + }) + require.NoError(t, err) + + var retrieved flow.Index + err = RetrieveIndex(db.Reader(), blockID, &retrieved) + require.NoError(t, err) + + require.Equal(t, index, &retrieved) + }) +} diff --git a/storage/protocol_kv_store.go b/storage/protocol_kv_store.go index 3666206d0cc..3b71cad25a3 100644 --- a/storage/protocol_kv_store.go +++ b/storage/protocol_kv_store.go @@ -1,8 +1,9 @@ package storage import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" ) // ProtocolKVStore persists different snapshots of key-value stores [KV-stores]. At this level, the API @@ -13,16 +14,20 @@ import ( // TODO maybe rename to `ProtocolStateSnapshots` (?) because at this low level, we are not exposing the // KV-store, it is just an encoded data blob type ProtocolKVStore interface { - // StoreTx returns an anonymous function (intended to be executed as part of a badger transaction), - // which persists the given KV-store snapshot as part of a DB tx. - // Expected errors of the returned anonymous function: - // - storage.ErrAlreadyExists if a KV-store snapshot with the given id is already stored. - StoreTx(stateID flow.Identifier, data *flow.PSKeyValueStoreData) func(*transaction.Tx) error + // BatchStore persists the KV-store snapshot in the database using the given ID as key. + // BatchStore is idempotent, i.e. it accepts repeated calls with the same pairs of (stateID, kvStore). + // Here, the ID is expected to be a collision-resistant hash of the snapshot (including the + // ProtocolStateVersion). Hence, for the same ID (key), BatchStore will reject changing the data (value). + // Expected errors during normal operations: + // - storage.ErrDataMismatch if a _different_ KV store for the given stateID has already been persisted + BatchStore(lctx lockctx.Proof, rw ReaderBatchWriter, stateID flow.Identifier, data *flow.PSKeyValueStoreData) error - // IndexTx returns an anonymous function intended to be executed as part of a database transaction. - // In a nutshell, we want to maintain a map from `blockID` to `stateID`, where `blockID` references the - // block that _proposes_ the updated key-value store. - // Upon call, the anonymous function persists the specific map entry in the node's database. + // BatchIndex appends the following operation to the provided write batch: + // we extend the map from `blockID` to `stateID`, where `blockID` references the + // block that _proposes_ updated key-value store. + // BatchIndex is idempotent, i.e. it accepts repeated calls with the same pairs of (blockID , stateID). + // Per protocol convention, the block references the `stateID`. As the `blockID` is a collision-resistant hash, + // for the same `blockID`, BatchIndex will reject changing the data. // Protocol convention: // - Consider block B, whose ingestion might potentially lead to an updated KV store. For example, // the KV store changes if we seal some execution results emitting specific service events. @@ -31,8 +36,8 @@ type ProtocolKVStore interface { // child block, _after_ validating the QC. // // Expected errors during normal operations: - // - storage.ErrAlreadyExists if a KV store for the given blockID has already been indexed. - IndexTx(blockID flow.Identifier, stateID flow.Identifier) func(*transaction.Tx) error + // - storage.ErrDataMismatch if a _different_ KV store for the given stateID has already been persisted + BatchIndex(lctx lockctx.Proof, rw ReaderBatchWriter, blockID flow.Identifier, stateID flow.Identifier) error // ByID retrieves the KV store snapshot with the given ID. // Expected errors during normal operations: diff --git a/storage/qcs.go b/storage/qcs.go index fab51e125ea..7a276d2f426 100644 --- a/storage/qcs.go +++ b/storage/qcs.go @@ -1,8 +1,9 @@ package storage import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" ) // QuorumCertificates represents storage for Quorum Certificates. @@ -11,9 +12,16 @@ import ( // In the example below, `QC_1` is indexed by `Block_1.ID()` // Block_1 <- Block_2(QC_1) type QuorumCertificates interface { - // StoreTx stores a Quorum Certificate as part of database transaction QC is indexed by QC.BlockID. - // * storage.ErrAlreadyExists if any QC for blockID is already stored - StoreTx(qc *flow.QuorumCertificate) func(*transaction.Tx) error + // BatchStore stores a Quorum Certificate as part of database batch update. QC is indexed by QC.BlockID. + // + // Note: For the same block, different QCs can easily be constructed by selecting different sub-sets of the received votes + // (provided more than the minimal number of consensus participants voted, which is typically the case). In most cases, it + // is only important that a block has been certified, but irrelevant who specifically contributed to the QC. Therefore, we + // only store the first QC. + // + // If *any* quorum certificate for QC.BlockID has already been stored, a `storage.ErrAlreadyExists` is returned (typically benign). + BatchStore(lockctx.Proof, ReaderBatchWriter, *flow.QuorumCertificate) error + // ByBlockID returns QC that certifies block referred by blockID. // * storage.ErrNotFound if no QC for blockID doesn't exist. ByBlockID(blockID flow.Identifier) (*flow.QuorumCertificate, error) diff --git a/storage/receipts.go b/storage/receipts.go index dd1ce3561eb..f85040cbcce 100644 --- a/storage/receipts.go +++ b/storage/receipts.go @@ -1,6 +1,8 @@ package storage import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" ) @@ -21,6 +23,8 @@ type ExecutionReceipts interface { // ByBlockID retrieves all known execution receipts for the given block // (from any Execution Node). + // + // No errors are expected errors during normal operations. ByBlockID(blockID flow.Identifier) (flow.ExecutionReceiptList, error) } @@ -32,13 +36,13 @@ type MyExecutionReceipts interface { // No errors are expected during normal operation // If entity fails marshalling, the error is wrapped in a generic error and returned. // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. - BatchStoreMyReceipt(receipt *flow.ExecutionReceipt, batch ReaderBatchWriter) error + BatchStoreMyReceipt(lctx lockctx.Proof, receipt *flow.ExecutionReceipt, batch ReaderBatchWriter) error // MyReceipt retrieves my receipt for the given block. MyReceipt(blockID flow.Identifier) (*flow.ExecutionReceipt, error) // BatchRemoveIndexByBlockID removes blockID-to-my-execution-receipt index entry keyed by a blockID in a provided batch // No errors are expected during normal operation, even if no entries are matched. - // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. + // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. BatchRemoveIndexByBlockID(blockID flow.Identifier, batch ReaderBatchWriter) error } diff --git a/storage/results.go b/storage/results.go index 733c8bc568b..a943866370e 100644 --- a/storage/results.go +++ b/storage/results.go @@ -2,18 +2,12 @@ package storage import ( "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" ) type ExecutionResultsReader interface { // ByID retrieves an execution result by its ID. Returns `ErrNotFound` if `resultID` is unknown. ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) - // ByIDTx returns a functor which retrieves the execution result by its ID, as part of a future database transaction. - // When executing the functor, it returns `ErrNotFound` if no execution result with the respective ID is known. - // deprecated - ByIDTx(resultID flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error) - // ByBlockID retrieves an execution result by block ID. ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) } diff --git a/storage/store/approvals.go b/storage/store/approvals.go index c68a83219c4..e53ccd7e8b8 100644 --- a/storage/store/approvals.go +++ b/storage/store/approvals.go @@ -1,9 +1,9 @@ package store import ( - "errors" "fmt" - "sync" + + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" @@ -21,18 +21,14 @@ import ( // *only safe* for Verification Nodes when tracking their own approvals (for the same ExecutionResult, // a Verifier will always produce the same approval) type ResultApprovals struct { - db storage.DB - cache *Cache[flow.Identifier, *flow.ResultApproval] - indexing *sync.Mutex // preventing concurrent indexing of approvals + db storage.DB + cache *Cache[flow.Identifier, *flow.ResultApproval] + lockManager lockctx.Manager } var _ storage.ResultApprovals = (*ResultApprovals)(nil) -func NewResultApprovals(collector module.CacheMetrics, db storage.DB) *ResultApprovals { - store := func(rw storage.ReaderBatchWriter, key flow.Identifier, val *flow.ResultApproval) error { - return operation.InsertResultApproval(rw.Writer(), val) - } - +func NewResultApprovals(collector module.CacheMetrics, db storage.DB, lockManager lockctx.Manager) *ResultApprovals { retrieve := func(r storage.Reader, approvalID flow.Identifier) (*flow.ResultApproval, error) { var approval flow.ResultApproval err := operation.RetrieveResultApproval(r, approvalID, &approval) @@ -40,71 +36,53 @@ func NewResultApprovals(collector module.CacheMetrics, db storage.DB) *ResultApp } return &ResultApprovals{ - db: db, + lockManager: lockManager, + db: db, cache: newCache(collector, metrics.ResourceResultApprovals, withLimit[flow.Identifier, *flow.ResultApproval](flow.DefaultTransactionExpiry+100), - withStore(store), withRetrieve(retrieve)), - indexing: new(sync.Mutex), } } -// Store stores a ResultApproval -func (r *ResultApprovals) Store(approval *flow.ResultApproval) error { - return r.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return r.cache.PutTx(rw, approval.ID(), approval) - }) -} - -// Index indexes a ResultApproval by chunk (ResultID + chunk index). -// This operation is idempotent (repeated calls with the same value are equivalent to -// just calling the method once; still the method succeeds on each call). +// StoreMyApproval returns a functor, whose execution +// - will store the given ResultApproval +// - and index it by result ID and chunk index. +// - requires storage.LockIndexResultApproval lock to be held by the caller +// +// The functor's expected error returns during normal operation are: +// - `storage.ErrDataMismatch` if a *different* approval for the same key pair (ExecutionResultID, chunk index) is already indexed // // CAUTION: the Flow protocol requires multiple approvals for the same chunk from different verification // nodes. In other words, there are multiple different approvals for the same chunk. Therefore, the index // Executed Chunk ➜ ResultApproval ID (populated here) is *only safe* to be used by Verification Nodes // for tracking their own approvals. -func (r *ResultApprovals) Index(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) error { - // For the same ExecutionResult, a correct Verifier will always produce the same approval. In other words, - // if we have already indexed an approval for the pair (resultID, chunkIndex) we should never overwrite it - // with a _different_ approval. We explicitly enforce that here to prevent state corruption. - // The lock guarantees that no other thread can concurrently update the index. Thereby confirming that no value - // is already stored for the given key (resultID, chunkIndex) and then updating the index (or aborting) is - // synchronized into one atomic operation. - r.indexing.Lock() - defer r.indexing.Unlock() - - err := r.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - var storedApprovalID flow.Identifier - err := operation.LookupResultApproval(rw.GlobalReader(), resultID, chunkIndex, &storedApprovalID) - if err != nil { - if !errors.Is(err, storage.ErrNotFound) { - return fmt.Errorf("could not lookup result approval ID: %w", err) - } - - // no approval found, index the approval - - return operation.UnsafeIndexResultApproval(rw.Writer(), resultID, chunkIndex, approvalID) - } - - // an approval is already indexed, double check if it is the same - // We don't allow indexing multiple approvals per chunk because the - // store is only used within Verification nodes, and it is impossible - // for a Verification node to compute different approvals for the same - // chunk. - - if storedApprovalID != approvalID { - return fmt.Errorf("attempting to store conflicting approval (result: %v, chunk index: %d): storing: %v, stored: %v. %w", - resultID, chunkIndex, approvalID, storedApprovalID, storage.ErrDataMismatch) +// +// For the same ExecutionResult, a Verifier will always produce the same approval. Therefore, this operation +// is idempotent, i.e. repeated calls with the *same inputs* are equivalent to just calling the method once; +// still the method succeeds on each call. However, when attempting to index *different* ResultApproval IDs +// for the same key (resultID, chunkIndex) this method returns an exception, as this should never happen for +// a correct Verification Node indexing its own approvals. +// It returns a functor so that some computation (such as computing approval ID) can be done +// before acquiring the lock. +func (r *ResultApprovals) StoreMyApproval(approval *flow.ResultApproval) func(lctx lockctx.Proof) error { + // pre-compute the approval ID and encoded data to be stored + // db operation is deferred until the returned function is called + storing := operation.InsertAndIndexResultApproval(approval) + + return func(lctx lockctx.Proof) error { + if !lctx.HoldsLock(storage.LockIndexResultApproval) { + return fmt.Errorf("missing lock for index result approval") } - return nil - }) - - if err != nil { - return fmt.Errorf("could not index result approval: %w", err) + return r.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + storage.OnCommitSucceed(rw, func() { + // the success callback is called after the lock is released, so + // the id computation here would not increase the lock contention + r.cache.Insert(approval.ID(), approval) + }) + return storing(lctx, rw) + }) } - return nil } // ByID retrieves a ResultApproval by its ID diff --git a/storage/store/approvals_test.go b/storage/store/approvals_test.go index b6211b363b2..af74072d206 100644 --- a/storage/store/approvals_test.go +++ b/storage/store/approvals_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation/dbtest" @@ -17,19 +18,26 @@ import ( func TestApprovalStoreAndRetrieve(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() - store := store.NewResultApprovals(metrics, db) + lockManager := storage.NewTestingLockManager() + store := store.NewResultApprovals(metrics, db, lockManager) + // create the deferred database operation to store `approval`; we deliberately + // do this outside of the lock to confirm that the lock is not required for + // creating the operation -- only for executing the storage write further below approval := unittest.ResultApprovalFixture() - err := store.Store(approval) - require.NoError(t, err) - - err = store.Index(approval.Body.ExecutionResultID, approval.Body.ChunkIndex, approval.ID()) + storing := store.StoreMyApproval(approval) + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockIndexResultApproval)) + err := storing(lctx) require.NoError(t, err) + defer lctx.Release() // While still holding the lock, verify that reads are not blocked by acquired locks + // retrieve entire approval by its ID byID, err := store.ByID(approval.ID()) require.NoError(t, err) require.Equal(t, approval, byID) + // retrieve approval by pair (executed result ID, chunk index) byChunk, err := store.ByChunk(approval.Body.ExecutionResultID, approval.Body.ChunkIndex) require.NoError(t, err) require.Equal(t, approval, byChunk) @@ -39,43 +47,50 @@ func TestApprovalStoreAndRetrieve(t *testing.T) { func TestApprovalStoreTwice(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() - store := store.NewResultApprovals(metrics, db) + lockManager := storage.NewTestingLockManager() + store := store.NewResultApprovals(metrics, db, lockManager) + // create the deferred database operation to store `approval`; we deliberately + // do this outside of the lock to confirm that the lock is not required for + // creating the operation -- only for executing the storage write further below approval := unittest.ResultApprovalFixture() - err := store.Store(approval) - require.NoError(t, err) - - err = store.Index(approval.Body.ExecutionResultID, approval.Body.ChunkIndex, approval.ID()) + storing := store.StoreMyApproval(approval) + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockIndexResultApproval)) + err := storing(lctx) require.NoError(t, err) + lctx.Release() - err = store.Store(approval) - require.NoError(t, err) - - err = store.Index(approval.Body.ExecutionResultID, approval.Body.ChunkIndex, approval.ID()) + lctx2 := lockManager.NewContext() + require.NoError(t, lctx2.AcquireLock(storage.LockIndexResultApproval)) + err = storing(lctx2) // repeated storage of same approval should be no-op require.NoError(t, err) + lctx2.Release() }) } func TestApprovalStoreTwoDifferentApprovalsShouldFail(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() - store := store.NewResultApprovals(metrics, db) + lockManager := storage.NewTestingLockManager() + store := store.NewResultApprovals(metrics, db, lockManager) - approval1 := unittest.ResultApprovalFixture() - approval2 := unittest.ResultApprovalFixture() - - err := store.Store(approval1) - require.NoError(t, err) + approval1, approval2 := twoApprovalsForTheSameResult(t) - err = store.Index(approval1.Body.ExecutionResultID, approval1.Body.ChunkIndex, approval1.ID()) + storing := store.StoreMyApproval(approval1) + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockIndexResultApproval)) + err := storing(lctx) + lctx.Release() require.NoError(t, err) // we can store a different approval, but we can't index a different // approval for the same chunk. - err = store.Store(approval2) - require.NoError(t, err) - - err = store.Index(approval1.Body.ExecutionResultID, approval1.Body.ChunkIndex, approval2.ID()) + storing2 := store.StoreMyApproval(approval2) + lctx2 := lockManager.NewContext() + require.NoError(t, lctx2.AcquireLock(storage.LockIndexResultApproval)) + err = storing2(lctx2) + lctx2.Release() require.Error(t, err) require.ErrorIs(t, err, storage.ErrDataMismatch) }) @@ -86,38 +101,44 @@ func TestApprovalStoreTwoDifferentApprovalsShouldFail(t *testing.T) { func TestApprovalStoreTwoDifferentApprovalsConcurrently(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() - store := store.NewResultApprovals(metrics, db) + lockManager := storage.NewTestingLockManager() + store := store.NewResultApprovals(metrics, db, lockManager) - approval1 := unittest.ResultApprovalFixture() - approval2 := unittest.ResultApprovalFixture() + approval1, approval2 := twoApprovalsForTheSameResult(t) - var wg sync.WaitGroup - wg.Add(2) + var startSignal sync.WaitGroup // goroutines attempting store operations will wait for this signal to start concurrently + startSignal.Add(1) // expecting one signal from the main thread to start both goroutines + var doneSinal sync.WaitGroup // the main thread will wait on this for goroutines attempting store operations to finish + doneSinal.Add(2) // expecting two goroutines to signal finish var firstIndexErr, secondIndexErr error // First goroutine stores and indexes the first approval. go func() { - defer wg.Done() - - err := store.Store(approval1) - require.NoError(t, err) - - firstIndexErr = store.Index(approval1.Body.ExecutionResultID, approval1.Body.ChunkIndex, approval1.ID()) + storing := store.StoreMyApproval(approval1) + lctx := lockManager.NewContext() + + startSignal.Wait() + require.NoError(t, lctx.AcquireLock(storage.LockIndexResultApproval)) + firstIndexErr = storing(lctx) + lctx.Release() + doneSinal.Done() }() // Second goroutine stores and tries to index the second approval for the same chunk. go func() { - defer wg.Done() - - err := store.Store(approval2) - require.NoError(t, err) - - secondIndexErr = store.Index(approval1.Body.ExecutionResultID, approval1.Body.ChunkIndex, approval2.ID()) + storing := store.StoreMyApproval(approval2) + lctx := lockManager.NewContext() + + startSignal.Wait() + require.NoError(t, lctx.AcquireLock(storage.LockIndexResultApproval)) + secondIndexErr = storing(lctx) + lctx.Release() + doneSinal.Done() }() - // Wait for both goroutines to finish - wg.Wait() + startSignal.Done() // start both goroutines + doneSinal.Wait() // wait for both goroutines to finish // Check that one of the Index operations succeeded and the other failed if firstIndexErr == nil { @@ -129,3 +150,14 @@ func TestApprovalStoreTwoDifferentApprovalsConcurrently(t *testing.T) { } }) } + +func twoApprovalsForTheSameResult(t *testing.T) (*flow.ResultApproval, *flow.ResultApproval) { + approval1 := unittest.ResultApprovalFixture() + approval2 := unittest.ResultApprovalFixture() + // have two entirely different approvals, nor modify the second to reference the same result and chunk as the first + approval2.Body.ChunkIndex = approval1.Body.ChunkIndex + approval2.Body.ExecutionResultID = approval1.Body.ExecutionResultID + // sanity check: make sure the two approvals are different + require.NotEqual(t, approval1.ID(), approval2.ID(), "expected two different approvals, but got the same ID") + return approval1, approval2 +} diff --git a/storage/store/blocks.go b/storage/store/blocks.go new file mode 100644 index 00000000000..99afea32095 --- /dev/null +++ b/storage/store/blocks.go @@ -0,0 +1,134 @@ +package store + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// Blocks implements a simple block storage around a badger DB. +type Blocks struct { + db storage.DB + headers *Headers + payloads *Payloads +} + +var _ storage.Blocks = (*Blocks)(nil) + +// NewBlocks ... +func NewBlocks(db storage.DB, headers *Headers, payloads *Payloads) *Blocks { + b := &Blocks{ + db: db, + headers: headers, + payloads: payloads, + } + return b +} + +// BatchStore stores a valid block in a batch. +// +// Expected errors during normal operations: +// - [storage.ErrAlreadyExists] if some block with the same ID has already been stored +func (b *Blocks) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, block *flow.Block) error { + err := b.headers.storeTx(lctx, rw, block.Header) + if err != nil { + return fmt.Errorf("could not store header %v: %w", block.Header.ID(), err) + } + err = b.payloads.storeTx(lctx, rw, block.ID(), block.Payload) + if err != nil { + return fmt.Errorf("could not store payload: %w", err) + } + return nil +} + +// retrieve returns the block with the given hash. It is available for +// finalized and pending blocks. +// Expected errors during normal operations: +// - storage.ErrNotFound if no block is found +func (b *Blocks) retrieve(blockID flow.Identifier) (*flow.Block, error) { + header, err := b.headers.retrieveTx(blockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve header: %w", err) + } + payload, err := b.payloads.retrieveTx(blockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve payload: %w", err) + } + block := &flow.Block{ + Header: header, + Payload: payload, + } + return block, nil +} + +// ByID returns the block with the given hash. It is available for +// finalized and pending blocks. +// Expected errors during normal operations: +// - storage.ErrNotFound if no block is found +func (b *Blocks) ByID(blockID flow.Identifier) (*flow.Block, error) { + return b.retrieve(blockID) +} + +// ByView returns the block with the given view. It is only available for certified blocks. +// certified blocks are the blocks that have received QC. Hotstuff guarantees that for each view, +// at most one block is certified. Hence, the return value of `ByView` is guaranteed to be unique +// even for non-finalized blocks. +// Expected errors during normal operations: +// - `storage.ErrNotFound` if no certified block is known at given view. +func (b *Blocks) ByView(view uint64) (*flow.Block, error) { + blockID, err := b.headers.BlockIDByView(view) + if err != nil { + return nil, err + } + return b.ByID(blockID) +} + +// ByHeight returns the block at the given height. It is only available +// for finalized blocks. +// +// Expected errors during normal operations: +// - storage.ErrNotFound if no block is found for the given height +func (b *Blocks) ByHeight(height uint64) (*flow.Block, error) { + blockID, err := b.headers.retrieveIdByHeightTx(height) + if err != nil { + return nil, err + } + return b.retrieve(blockID) +} + +// ByCollectionID returns the *finalized** block that contains the collection with the given ID. +// +// Expected errors during normal operations: +// - storage.ErrNotFound if finalized block is known that contains the collection +func (b *Blocks) ByCollectionID(collID flow.Identifier) (*flow.Block, error) { + var blockID flow.Identifier + err := operation.LookupBlockContainingCollection(b.db.Reader(), collID, &blockID) + if err != nil { + return nil, fmt.Errorf("could not look up block: %w", err) + } + return b.ByID(blockID) +} + +// IndexBlockForCollections indexes the block each collection was included in. +// CAUTION: a collection can be included in multiple *unfinalized* blocks. However, the implementation +// assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY +// *and* only in the absence of byzantine collector clusters (which the mature protocol must tolerate). +// Hence, this function should be treated as a temporary solution, which requires generalization +// (one-to-many mapping) for soft finality and the mature protocol. +// +// No errors expected during normal operation. +func (b *Blocks) IndexBlockForCollections(blockID flow.Identifier, collIDs []flow.Identifier) error { + return b.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, collID := range collIDs { + err := operation.IndexCollectionBlock(rw.Writer(), collID, blockID) + if err != nil { + return fmt.Errorf("could not index collection block (%x): %w", collID, err) + } + } + return nil + }) +} diff --git a/storage/store/blocks_test.go b/storage/store/blocks_test.go new file mode 100644 index 00000000000..b698b7015e4 --- /dev/null +++ b/storage/store/blocks_test.go @@ -0,0 +1,54 @@ +package store_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestBlockStoreAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + cacheMetrics := &metrics.NoopCollector{} + // verify after storing a block should be able to retrieve it back + blocks := store.InitAll(cacheMetrics, db).Blocks + block := unittest.FullBlockFixture() + block.SetPayload(unittest.PayloadFixture(unittest.WithAllTheFixins)) + + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(t, err) + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, &block) + }) + require.NoError(t, err) + lctx.Release() + + retrieved, err := blocks.ByID(block.ID()) + require.NoError(t, err) + require.Equal(t, &block, retrieved) + + // repeated storage of the same block should return + lctx2 := lockManager.NewContext() + require.NoError(t, lctx2.AcquireLock(storage.LockInsertBlock)) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx2, rw, &block) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + lctx2.Release() + + // verify after a restart, the block stored in the database is the same + // as the original + blocksAfterRestart := store.InitAll(cacheMetrics, db).Blocks + receivedAfterRestart, err := blocksAfterRestart.ByID(block.ID()) + require.NoError(t, err) + require.Equal(t, &block, receivedAfterRestart) + }) +} diff --git a/storage/store/cache.go b/storage/store/cache.go index d8f877d4c38..8c8178c9600 100644 --- a/storage/store/cache.go +++ b/storage/store/cache.go @@ -5,12 +5,12 @@ import ( "fmt" lru "github.com/hashicorp/golang-lru/v2" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/storage" ) -// nolint:unused func withLimit[K comparable, V any](limit uint) func(*Cache[K, V]) { return func(c *Cache[K, V]) { c.limit = limit @@ -19,21 +19,30 @@ func withLimit[K comparable, V any](limit uint) func(*Cache[K, V]) { type storeFunc[K comparable, V any] func(rw storage.ReaderBatchWriter, key K, val V) error +type storeWithLockFunc[K comparable, V any] func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, key K, val V) error + const DefaultCacheSize = uint(1000) -// nolint:unused func withStore[K comparable, V any](store storeFunc[K, V]) func(*Cache[K, V]) { return func(c *Cache[K, V]) { c.store = store } } -// nolint:unused +func withStoreWithLock[K comparable, V any](store storeWithLockFunc[K, V]) func(*Cache[K, V]) { + return func(c *Cache[K, V]) { + c.storeWithLock = store + } +} + func noStore[K comparable, V any](_ storage.ReaderBatchWriter, _ K, _ V) error { return fmt.Errorf("no store function for cache put available") } -// nolint: unused +func noStoreWithLock[K comparable, V any](_ lockctx.Proof, _ storage.ReaderBatchWriter, _ K, _ V) error { + return fmt.Errorf("no store function for cache put with lock available") +} + func noopStore[K comparable, V any](_ storage.ReaderBatchWriter, _ K, _ V) error { return nil } @@ -52,39 +61,37 @@ func noRemove[K comparable](_ storage.ReaderBatchWriter, _ K) error { type retrieveFunc[K comparable, V any] func(r storage.Reader, key K) (V, error) -// nolint:unused func withRetrieve[K comparable, V any](retrieve retrieveFunc[K, V]) func(*Cache[K, V]) { return func(c *Cache[K, V]) { c.retrieve = retrieve } } -// nolint:unused func noRetrieve[K comparable, V any](_ storage.Reader, _ K) (V, error) { var nullV V return nullV, fmt.Errorf("no retrieve function for cache get available") } type Cache[K comparable, V any] struct { - metrics module.CacheMetrics - // nolint:unused - limit uint - store storeFunc[K, V] - retrieve retrieveFunc[K, V] - remove removeFunc[K] - resource string - cache *lru.Cache[K, V] + metrics module.CacheMetrics + limit uint + store storeFunc[K, V] + storeWithLock storeWithLockFunc[K, V] + retrieve retrieveFunc[K, V] + remove removeFunc[K] + resource string + cache *lru.Cache[K, V] } -// nolint:unused func newCache[K comparable, V any](collector module.CacheMetrics, resourceName string, options ...func(*Cache[K, V])) *Cache[K, V] { c := Cache[K, V]{ - metrics: collector, - limit: 1000, - store: noStore[K, V], - retrieve: noRetrieve[K, V], - remove: noRemove[K], - resource: resourceName, + metrics: collector, + limit: 1000, + store: noStore[K, V], + storeWithLock: noStoreWithLock[K, V], + retrieve: noRetrieve[K, V], + remove: noRemove[K], + resource: resourceName, } for _, option := range options { option(&c) @@ -159,6 +166,19 @@ func (c *Cache[K, V]) PutTx(rw storage.ReaderBatchWriter, key K, resource V) err return nil } +func (c *Cache[K, V]) PutWithLockTx(lctx lockctx.Proof, rw storage.ReaderBatchWriter, key K, resource V) error { + storage.OnCommitSucceed(rw, func() { + c.Insert(key, resource) + }) + + err := c.storeWithLock(lctx, rw, key, resource) + if err != nil { + return fmt.Errorf("could not store resource: %w", err) + } + + return nil +} + func (c *Cache[K, V]) RemoveTx(rw storage.ReaderBatchWriter, key K) error { storage.OnCommitSucceed(rw, func() { c.Remove(key) @@ -171,7 +191,3 @@ func (c *Cache[K, V]) RemoveTx(rw storage.ReaderBatchWriter, key K) error { return nil } - -func (c *Cache[K, V]) RemoveFunc(del func(key K) bool) { - _ = c.cache.RemoveFunc(del) -} diff --git a/storage/store/cache_test.go b/storage/store/cache_test.go index 8f33fcf595c..072ada39eb8 100644 --- a/storage/store/cache_test.go +++ b/storage/store/cache_test.go @@ -173,77 +173,6 @@ func TestCache_ExceptionNotCached(t *testing.T) { }) } -func BenchmarkCacheRemoveFunc(b *testing.B) { - const txCountPerBlock = 5 - - benchmarks := []struct { - name string - cacheSize int - removeCount int - }{ - {name: "cache size 1,000, remove count 25", cacheSize: 1_000, removeCount: 25}, - {name: "cache size 2,000, remove count 25", cacheSize: 2_000, removeCount: 25}, - {name: "cache size 3,000, remove count 25", cacheSize: 3_000, removeCount: 25}, - {name: "cache size 4,000, remove count 25", cacheSize: 4_000, removeCount: 25}, - {name: "cache size 5,000, remove count 25", cacheSize: 5_000, removeCount: 25}, - {name: "cache size 6,000, remove count 25", cacheSize: 6_000, removeCount: 25}, - {name: "cache size 7,000, remove count 25", cacheSize: 7_000, removeCount: 25}, - {name: "cache size 8,000, remove count 25", cacheSize: 8_000, removeCount: 25}, - {name: "cache size 9,000, remove count 25", cacheSize: 9_000, removeCount: 25}, - {name: "cache size 10,000, remove count 25", cacheSize: 10_000, removeCount: 25}, - {name: "cache size 20,000, remove count 25", cacheSize: 20_000, removeCount: 25}, - } - - for _, bm := range benchmarks { - b.Run(bm.name, func(b *testing.B) { - blockCount := bm.cacheSize/txCountPerBlock + 1 - - blockIDs := make([]flow.Identifier, blockCount) - for i := range len(blockIDs) { - blockIDs[i] = unittest.IdentifierFixture() - } - - txIDs := make([]flow.Identifier, blockCount*txCountPerBlock) - for i := range len(txIDs) { - txIDs[i] = unittest.IdentifierFixture() - } - - prefixCount := bm.removeCount / txCountPerBlock - removePrefixes := make(map[string]bool) - for blockIDIndex := len(blockIDs) - 1; len(removePrefixes) < prefixCount; blockIDIndex-- { - blockID := blockIDs[blockIDIndex] - removePrefixes[fmt.Sprintf("%x", blockID)] = true - } - - for range b.N { - b.StopTimer() - - cache := newCache( - metrics.NewNoopCollector(), - metrics.ResourceTransactionResults, - withLimit[string, struct{}](uint(bm.cacheSize)), - withStore(noopStore[string, struct{}]), - withRetrieve(noRetrieve[string, struct{}]), - ) - - for i, blockID := range blockIDs { - for _, txID := range txIDs[i*txCountPerBlock : (i+1)*txCountPerBlock] { - key := fmt.Sprintf("%x%x", blockID, txID) - cache.Insert(key, struct{}{}) - } - } - - b.StartTimer() - - cache.RemoveFunc(func(key string) bool { - keyPrefix := key[:64] - return removePrefixes[keyPrefix] - }) - } - }) - } -} - func BenchmarkCacheRemove(b *testing.B) { const txCountPerBlock = 5 diff --git a/storage/store/chained/commits.go b/storage/store/chained/commits.go deleted file mode 100644 index ff6ea4faa2a..00000000000 --- a/storage/store/chained/commits.go +++ /dev/null @@ -1,39 +0,0 @@ -package chained - -import ( - "errors" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" -) - -type ChainedCommits struct { - first storage.CommitsReader - second storage.CommitsReader -} - -var _ storage.CommitsReader = (*ChainedCommits)(nil) - -// NewCommits returns a new ChainedCommits commits store, which will handle reads, which only implements -// read operations -// for reads, it first query the first database, then the second database, this is useful when migrating -// data from badger to pebble -func NewCommits(first storage.CommitsReader, second storage.CommitsReader) *ChainedCommits { - return &ChainedCommits{ - first: first, - second: second, - } -} - -func (c *ChainedCommits) ByBlockID(blockID flow.Identifier) (flow.StateCommitment, error) { - result, err := c.first.ByBlockID(blockID) - if err == nil { - return result, nil - } - - if errors.Is(err, storage.ErrNotFound) { - return c.second.ByBlockID(blockID) - } - - return flow.StateCommitment{}, err -} diff --git a/storage/store/chained/commits_test.go b/storage/store/chained/commits_test.go deleted file mode 100644 index 7681884d2a5..00000000000 --- a/storage/store/chained/commits_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package chained - -import ( - "testing" - - "github.com/cockroachdb/pebble" - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/operation/badgerimpl" - "github.com/onflow/flow-go/storage/operation/pebbleimpl" - "github.com/onflow/flow-go/storage/store" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestCommitsOnlyFirstHave(t *testing.T) { - unittest.RunWithBadgerDBAndPebbleDB(t, func(bdb *badger.DB, pdb *pebble.DB) { - bcommits := store.NewCommits(metrics.NewNoopCollector(), badgerimpl.ToDB(bdb)) - pcommits := store.NewCommits(metrics.NewNoopCollector(), pebbleimpl.ToDB(pdb)) - - blockID := unittest.IdentifierFixture() - commit := unittest.StateCommitmentFixture() - - chained := NewCommits(pcommits, bcommits) - - // not found - _, err := chained.ByBlockID(blockID) - require.Error(t, err) - require.ErrorIs(t, err, storage.ErrNotFound) - - // only stored in first - require.NoError(t, pcommits.Store(blockID, commit)) - actual, err := chained.ByBlockID(blockID) - require.NoError(t, err) - - require.Equal(t, commit, actual) - }) -} - -func TestCommitsOnlySecondHave(t *testing.T) { - unittest.RunWithBadgerDBAndPebbleDB(t, func(bdb *badger.DB, pdb *pebble.DB) { - bcommits := store.NewCommits(metrics.NewNoopCollector(), badgerimpl.ToDB(bdb)) - pcommits := store.NewCommits(metrics.NewNoopCollector(), pebbleimpl.ToDB(pdb)) - - blockID := unittest.IdentifierFixture() - commit := unittest.StateCommitmentFixture() - - chained := NewCommits(pcommits, bcommits) - // only stored in second - require.NoError(t, bcommits.Store(blockID, commit)) - actual, err := chained.ByBlockID(blockID) - require.NoError(t, err) - - require.Equal(t, commit, actual) - }) -} diff --git a/storage/store/chained/events.go b/storage/store/chained/events.go deleted file mode 100644 index 76781cc65cf..00000000000 --- a/storage/store/chained/events.go +++ /dev/null @@ -1,76 +0,0 @@ -package chained - -import ( - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" -) - -type ChainedEvents struct { - first storage.EventsReader - second storage.EventsReader -} - -var _ storage.EventsReader = (*ChainedEvents)(nil) - -// NewEvents returns a new ChainedEvents events store, which will handle reads, which only implements -// read operations -// for reads, it first query the first database, then the second database, this is useful when migrating -// data from badger to pebble -func NewEvents(first storage.EventsReader, second storage.EventsReader) *ChainedEvents { - return &ChainedEvents{ - first: first, - second: second, - } -} - -func (c *ChainedEvents) ByBlockID(blockID flow.Identifier) ([]flow.Event, error) { - events, err := c.first.ByBlockID(blockID) - if err != nil { - return nil, err - } - - if len(events) > 0 { - return events, nil - } - - return c.second.ByBlockID(blockID) -} - -func (c *ChainedEvents) ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) ([]flow.Event, error) { - events, err := c.first.ByBlockIDTransactionID(blockID, transactionID) - if err != nil { - return nil, err - } - - if len(events) > 0 { - return events, nil - } - - return c.second.ByBlockIDTransactionID(blockID, transactionID) -} - -func (c *ChainedEvents) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) ([]flow.Event, error) { - events, err := c.first.ByBlockIDTransactionIndex(blockID, txIndex) - if err != nil { - return nil, err - } - - if len(events) > 0 { - return events, nil - } - - return c.second.ByBlockIDTransactionIndex(blockID, txIndex) -} - -func (c *ChainedEvents) ByBlockIDEventType(blockID flow.Identifier, eventType flow.EventType) ([]flow.Event, error) { - events, err := c.first.ByBlockIDEventType(blockID, eventType) - if err != nil { - return nil, err - } - - if len(events) > 0 { - return events, nil - } - - return c.second.ByBlockIDEventType(blockID, eventType) -} diff --git a/storage/store/chained/events_test.go b/storage/store/chained/events_test.go deleted file mode 100644 index 5221c3c8c56..00000000000 --- a/storage/store/chained/events_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package chained - -import ( - "sort" - "testing" - - "github.com/cockroachdb/pebble" - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/operation/badgerimpl" - "github.com/onflow/flow-go/storage/operation/pebbleimpl" - "github.com/onflow/flow-go/storage/store" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestEventsOnlyFirstHave(t *testing.T) { - unittest.RunWithBadgerDBAndPebbleDB(t, func(bdb *badger.DB, pdb *pebble.DB) { - bevents := store.NewEvents(metrics.NewNoopCollector(), badgerimpl.ToDB(bdb)) - pevents := store.NewEvents(metrics.NewNoopCollector(), pebbleimpl.ToDB(pdb)) - - blockID := unittest.IdentifierFixture() - events := unittest.EventsFixture(3) - - chained := NewEvents(pevents, bevents) - - // not found - actual, err := chained.ByBlockID(blockID) - require.NoError(t, err) - require.Len(t, actual, 0) - - // only stored in first - require.NoError(t, pevents.Store(blockID, []flow.EventsList{events})) - actual, err = chained.ByBlockID(blockID) - require.NoError(t, err) - - eventsEqual(t, events, actual) - }) -} - -func TestEventsOnlySecondHave(t *testing.T) { - unittest.RunWithBadgerDBAndPebbleDB(t, func(bdb *badger.DB, pdb *pebble.DB) { - bevents := store.NewEvents(metrics.NewNoopCollector(), badgerimpl.ToDB(bdb)) - pevents := store.NewEvents(metrics.NewNoopCollector(), pebbleimpl.ToDB(pdb)) - - blockID := unittest.IdentifierFixture() - events := unittest.EventsFixture(3) - - chained := NewEvents(pevents, bevents) - // only stored in second - require.NoError(t, bevents.Store(blockID, []flow.EventsList{events})) - actual, err := chained.ByBlockID(blockID) - require.NoError(t, err) - - eventsEqual(t, events, actual) - }) -} - -func eventsEqual(t *testing.T, expected, actual []flow.Event) { - require.Len(t, actual, len(expected)) // Ensure they have the same length - - sortEvents(expected) - sortEvents(actual) - - require.Equal(t, expected, actual) -} - -// Define a sorting method based on event properties -func sortEvents(events []flow.Event) { - sort.Slice(events, func(i, j int) bool { - return events[i].EventIndex < events[j].EventIndex - }) -} diff --git a/storage/store/chained/results.go b/storage/store/chained/results.go deleted file mode 100644 index 8177ca895d5..00000000000 --- a/storage/store/chained/results.go +++ /dev/null @@ -1,68 +0,0 @@ -package chained - -import ( - "errors" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -type ChainedExecutionResults struct { - first storage.ExecutionResultsReader - second storage.ExecutionResultsReader -} - -var _ storage.ExecutionResultsReader = (*ChainedExecutionResults)(nil) - -// NewResults returns a new ChainedExecutionResults results store, which will handle reads. which only implements -// read operations -// for reads, it first query the first database, then the second database, this is useful when migrating -// data from badger to pebble -func NewExecutionResults(first storage.ExecutionResultsReader, second storage.ExecutionResultsReader) *ChainedExecutionResults { - return &ChainedExecutionResults{ - first: first, - second: second, - } -} - -func (c *ChainedExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) { - result, err := c.first.ByID(resultID) - if err == nil { - return result, nil - } - - if errors.Is(err, storage.ErrNotFound) { - return c.second.ByID(resultID) - } - - return nil, err -} - -func (c *ChainedExecutionResults) ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) { - result, err := c.first.ByBlockID(blockID) - if err == nil { - return result, nil - } - - if errors.Is(err, storage.ErrNotFound) { - return c.second.ByBlockID(blockID) - } - - return nil, err -} - -func (c *ChainedExecutionResults) ByIDTx(resultID flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error) { - return func(tx *transaction.Tx) (*flow.ExecutionResult, error) { - result, err := c.first.ByIDTx(resultID)(tx) - if err == nil { - return result, nil - } - - if errors.Is(err, storage.ErrNotFound) { - return c.second.ByIDTx(resultID)(tx) - } - - return nil, err - } -} diff --git a/storage/store/chained/results_test.go b/storage/store/chained/results_test.go deleted file mode 100644 index b503ce1376a..00000000000 --- a/storage/store/chained/results_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package chained - -import ( - "testing" - - "github.com/cockroachdb/pebble" - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/operation/badgerimpl" - "github.com/onflow/flow-go/storage/operation/pebbleimpl" - "github.com/onflow/flow-go/storage/store" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestExecutionResultsOnlyFirstHave(t *testing.T) { - unittest.RunWithBadgerDBAndPebbleDB(t, func(bdb *badger.DB, pdb *pebble.DB) { - bresults := store.NewExecutionResults(metrics.NewNoopCollector(), badgerimpl.ToDB(bdb)) - presults := store.NewExecutionResults(metrics.NewNoopCollector(), pebbleimpl.ToDB(pdb)) - - result := unittest.ExecutionResultFixture() - - chained := NewExecutionResults(presults, bresults) - - // not found - _, err := chained.ByID(result.ID()) - require.Error(t, err) - require.ErrorIs(t, err, storage.ErrNotFound) - - // only stored in first - require.NoError(t, presults.Store(result)) - actual, err := chained.ByID(result.ID()) - require.NoError(t, err) - - require.Equal(t, result, actual) - }) -} - -func TestExecutionResultsOnlySecondHave(t *testing.T) { - unittest.RunWithBadgerDBAndPebbleDB(t, func(bdb *badger.DB, pdb *pebble.DB) { - bresults := store.NewExecutionResults(metrics.NewNoopCollector(), badgerimpl.ToDB(bdb)) - presults := store.NewExecutionResults(metrics.NewNoopCollector(), pebbleimpl.ToDB(pdb)) - - result := unittest.ExecutionResultFixture() - - chained := NewExecutionResults(presults, bresults) - // only stored in second - require.NoError(t, bresults.Store(result)) - actual, err := chained.ByID(result.ID()) - require.NoError(t, err) - - require.Equal(t, result, actual) - }) -} diff --git a/storage/store/chained/transaction_results.go b/storage/store/chained/transaction_results.go deleted file mode 100644 index 5a8718e1a8b..00000000000 --- a/storage/store/chained/transaction_results.go +++ /dev/null @@ -1,74 +0,0 @@ -package chained - -import ( - "errors" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" -) - -type ChainedTransactionResults struct { - first storage.TransactionResultsReader - second storage.TransactionResultsReader -} - -var _ storage.TransactionResultsReader = (*ChainedTransactionResults)(nil) - -// NewTransactionResults returns a new ChainedTransactionResults transaction results store, which only implements -// read operations -// it first queries the first database, then the second database. this is useful when migrating -// data from badger to pebble -func NewTransactionResults(first storage.TransactionResultsReader, second storage.TransactionResultsReader) *ChainedTransactionResults { - return &ChainedTransactionResults{ - first: first, - second: second, - } -} - -// ByBlockIDTransactionID returns the runtime transaction result for the given block ID and transaction ID -// It returns storage.ErrNotFound if the result is not found -// any other errors are exceptions -func (c *ChainedTransactionResults) ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.TransactionResult, error) { - result, err := c.first.ByBlockIDTransactionID(blockID, transactionID) - if err == nil { - return result, nil - } - - if errors.Is(err, storage.ErrNotFound) { - return c.second.ByBlockIDTransactionID(blockID, transactionID) - } - - return nil, err -} - -// ByBlockIDTransactionIndex returns the runtime transaction result for the given block ID and transaction index -// It returns storage.ErrNotFound if the result is not found -// any other errors are exceptions -func (c *ChainedTransactionResults) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.TransactionResult, error) { - result, err := c.first.ByBlockIDTransactionIndex(blockID, txIndex) - if err == nil { - return result, nil - } - - if errors.Is(err, storage.ErrNotFound) { - return c.second.ByBlockIDTransactionIndex(blockID, txIndex) - } - - return nil, err -} - -// ByBlockID gets all transaction results for a block, ordered by transaction index -// It returns storage.ErrNotFound if the result is not found -// any other errors are exceptions -func (c *ChainedTransactionResults) ByBlockID(id flow.Identifier) ([]flow.TransactionResult, error) { - results, err := c.first.ByBlockID(id) - if err != nil { - return nil, err - } - - if len(results) > 0 { - return results, nil - } - - return c.second.ByBlockID(id) -} diff --git a/storage/store/chained/transaction_results_test.go b/storage/store/chained/transaction_results_test.go deleted file mode 100644 index bc77db8c16e..00000000000 --- a/storage/store/chained/transaction_results_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package chained - -import ( - "fmt" - "testing" - - "github.com/cockroachdb/pebble" - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/operation/badgerimpl" - "github.com/onflow/flow-go/storage/operation/pebbleimpl" - "github.com/onflow/flow-go/storage/store" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestTxResultsOnlyFirstHave(t *testing.T) { - unittest.RunWithBadgerDBAndPebbleDB(t, func(bdb *badger.DB, pdb *pebble.DB) { - bresults := store.NewTransactionResults(metrics.NewNoopCollector(), badgerimpl.ToDB(bdb), 1) - presults := store.NewTransactionResults(metrics.NewNoopCollector(), pebbleimpl.ToDB(pdb), 1) - - blockID := unittest.IdentifierFixture() - txResults := make([]flow.TransactionResult, 0, 10) - for i := 0; i < 10; i++ { - txID := unittest.IdentifierFixture() - expected := flow.TransactionResult{ - TransactionID: txID, - ErrorMessage: fmt.Sprintf("a runtime error %d", i), - } - txResults = append(txResults, expected) - } - - chained := NewTransactionResults(presults, bresults) - - // not found - actual, err := chained.ByBlockID(blockID) - require.NoError(t, err) - require.Len(t, actual, 0) - - // only stored in first - require.NoError(t, pebbleimpl.ToDB(pdb).WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return presults.BatchStore(blockID, txResults, rw) - })) - actual, err = chained.ByBlockID(blockID) - require.NoError(t, err) - - require.Equal(t, txResults, actual) - }) -} - -func TestTxResultsOnlySecondHave(t *testing.T) { - unittest.RunWithBadgerDBAndPebbleDB(t, func(bdb *badger.DB, pdb *pebble.DB) { - bresults := store.NewTransactionResults(metrics.NewNoopCollector(), badgerimpl.ToDB(bdb), 1) - presults := store.NewTransactionResults(metrics.NewNoopCollector(), pebbleimpl.ToDB(pdb), 1) - - blockID := unittest.IdentifierFixture() - txResults := make([]flow.TransactionResult, 0, 10) - for i := 0; i < 10; i++ { - txID := unittest.IdentifierFixture() - expected := flow.TransactionResult{ - TransactionID: txID, - ErrorMessage: fmt.Sprintf("a runtime error %d", i), - } - txResults = append(txResults, expected) - } - - chained := NewTransactionResults(presults, bresults) - - // not found - actual, err := chained.ByBlockID(blockID) - require.NoError(t, err) - require.Len(t, actual, 0) - - // only stored in second - require.NoError(t, badgerimpl.ToDB(bdb).WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return bresults.BatchStore(blockID, txResults, rw) - })) - actual, err = chained.ByBlockID(blockID) - require.NoError(t, err) - - require.Equal(t, txResults, actual) - }) -} diff --git a/storage/store/chunk_data_packs_test.go b/storage/store/chunk_data_packs_test.go index 829ec0356b4..65ff58d78f7 100644 --- a/storage/store/chunk_data_packs_test.go +++ b/storage/store/chunk_data_packs_test.go @@ -3,13 +3,11 @@ package store_test import ( "testing" - "github.com/cockroachdb/pebble" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" - badgerstorage "github.com/onflow/flow-go/storage/badger" "github.com/onflow/flow-go/storage/operation/pebbleimpl" "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" @@ -21,71 +19,66 @@ import ( // TestChunkDataPacks_Store evaluates correct storage and retrieval of chunk data packs in the storage. // It also evaluates that re-inserting is idempotent. func TestChunkDataPacks_Store(t *testing.T) { - WithChunkDataPacks(t, 100, func(t *testing.T, chunkDataPacks []*flow.ChunkDataPack, chunkDataPackStore *store.ChunkDataPacks, _ *badger.DB, _ *pebble.DB) { + WithChunkDataPacks(t, 100, func(t *testing.T, chunkDataPacks []*flow.ChunkDataPack, chunkDataPackStore *store.ChunkDataPacks, _ *pebble.DB) { require.NoError(t, chunkDataPackStore.Store(chunkDataPacks)) require.NoError(t, chunkDataPackStore.Store(chunkDataPacks)) }) } func TestChunkDataPack_Remove(t *testing.T) { - unittest.RunWithBadgerDB(t, func(bdb *badger.DB) { - unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { - // TODO: once transactions and collections are refactored to use the same storage interface, - // we can use the same storage.DB for both - transactions := badgerstorage.NewTransactions(&metrics.NoopCollector{}, bdb) - collections := badgerstorage.NewCollections(bdb, transactions) - // keep the cache size at 1 to make sure that entries are written and read from storage itself. - chunkDataPackStore := store.NewChunkDataPacks(&metrics.NoopCollector{}, pebbleimpl.ToDB(pdb), collections, 1) - - chunkDataPacks := unittest.ChunkDataPacksFixture(10) - for _, chunkDataPack := range chunkDataPacks { - // store1s collection in Collections storage (which ChunkDataPacks store uses internally) - err := collections.Store(chunkDataPack.Collection) - require.NoError(t, err) - } - - chunkIDs := make([]flow.Identifier, 0, len(chunkDataPacks)) - for _, chunk := range chunkDataPacks { - chunkIDs = append(chunkIDs, chunk.ID()) - } - - require.NoError(t, chunkDataPackStore.Store(chunkDataPacks)) - require.NoError(t, chunkDataPackStore.Remove(chunkIDs)) - - // verify it has been removed - _, err := chunkDataPackStore.ByChunkID(chunkIDs[0]) - assert.ErrorIs(t, err, storage.ErrNotFound) - - // Removing again should not error - require.NoError(t, chunkDataPackStore.Remove(chunkIDs)) - }) + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + transactions := store.NewTransactions(&metrics.NoopCollector{}, db) + collections := store.NewCollections(db, transactions) + // keep the cache size at 1 to make sure that entries are written and read from storage itself. + chunkDataPackStore := store.NewChunkDataPacks(&metrics.NoopCollector{}, db, collections, 1) + + chunkDataPacks := unittest.ChunkDataPacksFixture(10) + for _, chunkDataPack := range chunkDataPacks { + // store collection in Collections storage (which ChunkDataPacks store uses internally) + _, err := collections.Store(chunkDataPack.Collection) + require.NoError(t, err) + } + + chunkIDs := make([]flow.Identifier, 0, len(chunkDataPacks)) + for _, chunk := range chunkDataPacks { + chunkIDs = append(chunkIDs, chunk.ID()) + } + + require.NoError(t, chunkDataPackStore.Store(chunkDataPacks)) + require.NoError(t, chunkDataPackStore.Remove(chunkIDs)) + + // verify it has been removed + _, err := chunkDataPackStore.ByChunkID(chunkIDs[0]) + assert.ErrorIs(t, err, storage.ErrNotFound) + + // Removing again should not error + require.NoError(t, chunkDataPackStore.Remove(chunkIDs)) }) } // TestChunkDataPacks_MissingItem evaluates querying a missing item returns a storage.ErrNotFound error. func TestChunkDataPacks_MissingItem(t *testing.T) { - unittest.RunWithBadgerDB(t, func(bdb *badger.DB) { - unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { - // TODO: once transactions and collections are refactored to use the same storage interface, - // we can use the same storage.DB for both - transactions := badgerstorage.NewTransactions(&metrics.NoopCollector{}, bdb) - collections := badgerstorage.NewCollections(bdb, transactions) - store1 := store.NewChunkDataPacks(&metrics.NoopCollector{}, pebbleimpl.ToDB(pdb), collections, 1) - - // attempt to get an invalid - _, err := store1.ByChunkID(unittest.IdentifierFixture()) - assert.ErrorIs(t, err, storage.ErrNotFound) - }) + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + transactions := store.NewTransactions(&metrics.NoopCollector{}, db) + collections := store.NewCollections(db, transactions) + store1 := store.NewChunkDataPacks(&metrics.NoopCollector{}, db, collections, 1) + + // attempt to get an invalid + _, err := store1.ByChunkID(unittest.IdentifierFixture()) + assert.ErrorIs(t, err, storage.ErrNotFound) }) } // TestChunkDataPacks_StoreTwice evaluates that storing the same chunk data pack twice // does not result in an error. func TestChunkDataPacks_StoreTwice(t *testing.T) { - WithChunkDataPacks(t, 2, func(t *testing.T, chunkDataPacks []*flow.ChunkDataPack, chunkDataPackStore *store.ChunkDataPacks, bdb *badger.DB, pdb *pebble.DB) { - transactions := badgerstorage.NewTransactions(&metrics.NoopCollector{}, bdb) - collections := badgerstorage.NewCollections(bdb, transactions) - store1 := store.NewChunkDataPacks(&metrics.NoopCollector{}, pebbleimpl.ToDB(pdb), collections, 1) + WithChunkDataPacks(t, 2, func(t *testing.T, chunkDataPacks []*flow.ChunkDataPack, chunkDataPackStore *store.ChunkDataPacks, pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + transactions := store.NewTransactions(&metrics.NoopCollector{}, db) + collections := store.NewCollections(db, transactions) + store1 := store.NewChunkDataPacks(&metrics.NoopCollector{}, db, collections, 1) require.NoError(t, store1.Store(chunkDataPacks)) for _, c := range chunkDataPacks { @@ -100,33 +93,30 @@ func TestChunkDataPacks_StoreTwice(t *testing.T) { // WithChunkDataPacks is a test helper that generates specified number of chunk data packs, store1 them using the storeFunc, and // then evaluates whether they are successfully retrieved from storage. -func WithChunkDataPacks(t *testing.T, chunks int, storeFunc func(*testing.T, []*flow.ChunkDataPack, *store.ChunkDataPacks, *badger.DB, *pebble.DB)) { - unittest.RunWithBadgerDB(t, func(bdb *badger.DB) { - unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { - // TODO: once transactions and collections are refactored to use the same storage interface, - // we can use the same storage.DB for both - transactions := badgerstorage.NewTransactions(&metrics.NoopCollector{}, bdb) - collections := badgerstorage.NewCollections(bdb, transactions) - // keep the cache size at 1 to make sure that entries are written and read from storage itself. - store1 := store.NewChunkDataPacks(&metrics.NoopCollector{}, pebbleimpl.ToDB(pdb), collections, 1) - - chunkDataPacks := unittest.ChunkDataPacksFixture(chunks) - for _, chunkDataPack := range chunkDataPacks { - // store1s collection in Collections storage (which ChunkDataPacks store uses internally) - err := collections.Store(chunkDataPack.Collection) - require.NoError(t, err) - } - - // store1s chunk data packs in the memory using provided store function. - storeFunc(t, chunkDataPacks, store1, bdb, pdb) - - // store1d chunk data packs should be retrieved successfully. - for _, expected := range chunkDataPacks { - actual, err := store1.ByChunkID(expected.ChunkID) - require.NoError(t, err) - - assert.Equal(t, expected, actual) - } - }) +func WithChunkDataPacks(t *testing.T, chunks int, storeFunc func(*testing.T, []*flow.ChunkDataPack, *store.ChunkDataPacks, *pebble.DB)) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + transactions := store.NewTransactions(&metrics.NoopCollector{}, db) + collections := store.NewCollections(db, transactions) + // keep the cache size at 1 to make sure that entries are written and read from storage itself. + store1 := store.NewChunkDataPacks(&metrics.NoopCollector{}, db, collections, 1) + + chunkDataPacks := unittest.ChunkDataPacksFixture(chunks) + for _, chunkDataPack := range chunkDataPacks { + // store collection in Collections storage (which ChunkDataPacks store uses internally) + _, err := collections.Store(chunkDataPack.Collection) + require.NoError(t, err) + } + + // store chunk data packs in the memory using provided store function. + storeFunc(t, chunkDataPacks, store1, pdb) + + // store1d chunk data packs should be retrieved successfully. + for _, expected := range chunkDataPacks { + actual, err := store1.ByChunkID(expected.ChunkID) + require.NoError(t, err) + + assert.Equal(t, expected, actual) + } }) } diff --git a/storage/store/chunks_queue.go b/storage/store/chunks_queue.go index 38427e7088f..33941b58c35 100644 --- a/storage/store/chunks_queue.go +++ b/storage/store/chunks_queue.go @@ -19,7 +19,8 @@ import ( type ChunksQueue struct { db storage.DB chunkLocatorCache *Cache[uint64, *chunks.Locator] // cache for chunk locators, indexed by job index - storing *sync.Mutex + // TODO(7355): lockctx + storing *sync.Mutex } const JobQueueChunksQueue = "JobQueueChunksQueue" diff --git a/storage/badger/cluster_blocks.go b/storage/store/cluster_blocks.go similarity index 55% rename from storage/badger/cluster_blocks.go rename to storage/store/cluster_blocks.go index 88aef54526f..59b4aad183e 100644 --- a/storage/badger/cluster_blocks.go +++ b/storage/store/cluster_blocks.go @@ -1,25 +1,23 @@ -package badger +package store import ( "fmt" - "github.com/dgraph-io/badger/v2" - "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" ) // ClusterBlocks implements a simple block storage around a badger DB. type ClusterBlocks struct { - db *badger.DB + db storage.DB chainID flow.ChainID headers *Headers payloads *ClusterPayloads } -func NewClusterBlocks(db *badger.DB, chainID flow.ChainID, headers *Headers, payloads *ClusterPayloads) *ClusterBlocks { +func NewClusterBlocks(db storage.DB, chainID flow.ChainID, headers *Headers, payloads *ClusterPayloads) *ClusterBlocks { b := &ClusterBlocks{ db: db, chainID: chainID, @@ -29,24 +27,6 @@ func NewClusterBlocks(db *badger.DB, chainID flow.ChainID, headers *Headers, pay return b } -func (b *ClusterBlocks) Store(block *cluster.Block) error { - return operation.RetryOnConflictTx(b.db, transaction.Update, b.storeTx(block)) -} - -func (b *ClusterBlocks) storeTx(block *cluster.Block) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - err := b.headers.storeTx(block.Header)(tx) - if err != nil { - return fmt.Errorf("could not store header: %w", err) - } - err = b.payloads.storeTx(block.ID(), block.Payload)(tx) - if err != nil { - return fmt.Errorf("could not store payload: %w", err) - } - return nil - } -} - func (b *ClusterBlocks) ByID(blockID flow.Identifier) (*cluster.Block, error) { header, err := b.headers.ByBlockID(blockID) if err != nil { @@ -65,7 +45,7 @@ func (b *ClusterBlocks) ByID(blockID flow.Identifier) (*cluster.Block, error) { func (b *ClusterBlocks) ByHeight(height uint64) (*cluster.Block, error) { var blockID flow.Identifier - err := b.db.View(operation.LookupClusterBlockHeight(b.chainID, height, &blockID)) + err := operation.LookupClusterBlockHeight(b.db.Reader(), b.chainID, height, &blockID) if err != nil { return nil, fmt.Errorf("could not look up block: %w", err) } diff --git a/storage/store/cluster_blocks_test.go b/storage/store/cluster_blocks_test.go new file mode 100644 index 00000000000..8071a36d69f --- /dev/null +++ b/storage/store/cluster_blocks_test.go @@ -0,0 +1,78 @@ +package store + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/procedure" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestClusterBlocks tests inserting and querying a chain of cluster blocks. +func TestClusterBlocks(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + chain := unittest.ClusterBlockChainFixture(4) + parent, blocks := chain[0], chain[1:] + + // add parent and mark its height as the latest finalized block + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockHeight(lctx, rw.Writer(), parent.Header.ChainID, parent.Header.Height, parent.ID()) + }) + require.NoError(t, err) + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertClusterFinalizedHeight(lctx, rw.Writer(), parent.Header.ChainID, parent.Header.Height) + }) + require.NoError(t, err) + lctx.Release() + + // store chain of descending blocks + for _, block := range blocks { + lctx2 := lockManager.NewContext() + require.NoError(t, lctx2.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.InsertClusterBlock(lctx2, rw, &block) + }) + require.NoError(t, err) + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.FinalizeClusterBlock(lctx2, rw, block.Header.ID()) + }) + require.NoError(t, err) + lctx2.Release() + } + + clusterBlocks := NewClusterBlocks( + db, + blocks[0].Header.ChainID, + NewHeaders(metrics.NewNoopCollector(), db), + NewClusterPayloads(metrics.NewNoopCollector(), db), + ) + + t.Run("ByHeight", func(t *testing.T) { + // check if the block can be retrieved by height + for _, block := range blocks { + retrievedBlock, err := clusterBlocks.ByHeight(block.Header.Height) + require.NoError(t, err) + require.Equal(t, block.ID(), retrievedBlock.ID()) + } + }) + + t.Run("ByID", func(t *testing.T) { + // check if the block can be retrieved by ID + for _, block := range blocks { + retrievedBlock, err := clusterBlocks.ByID(block.ID()) + require.NoError(t, err) + require.Equal(t, block.ID(), retrievedBlock.ID()) + } + }) + }) +} diff --git a/storage/store/cluster_payloads.go b/storage/store/cluster_payloads.go new file mode 100644 index 00000000000..eef73366c7a --- /dev/null +++ b/storage/store/cluster_payloads.go @@ -0,0 +1,43 @@ +package store + +import ( + "fmt" + + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/procedure" +) + +// ClusterPayloads implements storage of block payloads for collection node +// cluster consensus. +type ClusterPayloads struct { + db storage.DB + cache *Cache[flow.Identifier, *cluster.Payload] +} + +func NewClusterPayloads(cacheMetrics module.CacheMetrics, db storage.DB) *ClusterPayloads { + retrieve := func(r storage.Reader, blockID flow.Identifier) (*cluster.Payload, error) { + var payload cluster.Payload + err := procedure.RetrieveClusterPayload(r, blockID, &payload) + return &payload, err + } + + cp := &ClusterPayloads{ + db: db, + cache: newCache[flow.Identifier, *cluster.Payload](cacheMetrics, metrics.ResourceClusterPayload, + withLimit[flow.Identifier, *cluster.Payload](flow.DefaultTransactionExpiry*4), + withRetrieve(retrieve)), + } + return cp +} + +func (cp *ClusterPayloads) ByBlockID(blockID flow.Identifier) (*cluster.Payload, error) { + val, err := cp.cache.Get(cp.db.Reader(), blockID) + if err != nil { + return nil, fmt.Errorf("failed to retrieve cluster block payload: %w", err) + } + return val, nil +} diff --git a/storage/store/cluster_payloads_test.go b/storage/store/cluster_payloads_test.go new file mode 100644 index 00000000000..f63ab5599f6 --- /dev/null +++ b/storage/store/cluster_payloads_test.go @@ -0,0 +1,50 @@ +package store_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/procedure" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestStoreRetrieveClusterPayload(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + payloads := store.NewClusterPayloads(metrics, db) + + blockID := unittest.IdentifierFixture() + expected := unittest.ClusterPayloadFixture(5) + + // store payload + manager := storage.NewTestingLockManager() + lctx := manager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock)) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return procedure.InsertClusterPayload(lctx, rw, blockID, expected) + }) + lctx.Release() + require.NoError(t, err) + + // fetch payload + payload, err := payloads.ByBlockID(blockID) + require.NoError(t, err) + require.Equal(t, expected, payload) + }) +} + +func TestClusterPayloadRetrieveWithoutStore(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + payloads := store.NewClusterPayloads(metrics, db) + + _, err := payloads.ByBlockID(unittest.IdentifierFixture()) // attempt to retrieve block for random ID + assert.ErrorIs(t, err, storage.ErrNotFound) + }) +} diff --git a/storage/store/collections.go b/storage/store/collections.go index 53eaf96da88..e24fafdb3f7 100644 --- a/storage/store/collections.go +++ b/storage/store/collections.go @@ -3,19 +3,23 @@ package store import ( "errors" "fmt" - "sync" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog/log" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/utils/logging" ) type Collections struct { db storage.DB transactions *Transactions - indexingByTx sync.Mutex + + // TODO: Add caching -- this might be relatively frequently queried within the AN; + // likely predominantly with requests about recent transactions. + // Note that we already have caching for transactions. } var _ storage.Collections = (*Collections)(nil) @@ -25,26 +29,15 @@ func NewCollections(db storage.DB, transactions *Transactions) *Collections { c := &Collections{ db: db, transactions: transactions, - indexingByTx: sync.Mutex{}, } return c } -func (c *Collections) StoreLight(collection *flow.LightCollection) error { - return c.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - err := operation.UpsertCollection(rw.Writer(), collection) - if err != nil { - return fmt.Errorf("could not insert collection: %w", err) - } - return nil - }) -} - // Store stores a collection in the database. // any error returned are exceptions -func (c *Collections) Store(collection *flow.Collection) error { - return c.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - light := collection.Light() +func (c *Collections) Store(collection *flow.Collection) (flow.LightCollection, error) { + light := collection.Light() + err := c.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := operation.UpsertCollection(rw.Writer(), &light) if err != nil { return fmt.Errorf("could not insert collection: %w", err) @@ -59,9 +52,18 @@ func (c *Collections) Store(collection *flow.Collection) error { return nil }) + + if err != nil { + return flow.LightCollection{}, err + } + return light, nil } -// ByID retrieves a collection by its ID. +// ByID returns the collection with the given ID, including all +// transactions within the collection. +// +// Expected errors during normal operation: +// - `storage.ErrNotFound` if no light collection was found. func (c *Collections) ByID(colID flow.Identifier) (*flow.Collection, error) { var ( light flow.LightCollection @@ -85,7 +87,11 @@ func (c *Collections) ByID(colID flow.Identifier) (*flow.Collection, error) { return &collection, nil } -// LightByID retrieves a light collection by its ID. +// LightByID returns a reduced representation of the collection with the given ID. +// The reduced collection references the constituent transactions by their hashes. +// +// Expected errors during normal operation: +// - `storage.ErrNotFound` if no light collection was found. func (c *Collections) LightByID(colID flow.Identifier) (*flow.LightCollection, error) { var collection flow.LightCollection @@ -121,7 +127,7 @@ func (c *Collections) Remove(colID flow.Identifier) error { return fmt.Errorf("could not remove collection payload indices: %w", err) } - err = operation.RemoveTransaction(rw.Writer(), txID) + err = c.transactions.RemoveBatch(rw, txID) if err != nil { return fmt.Errorf("could not remove transaction: %w", err) } @@ -130,17 +136,23 @@ func (c *Collections) Remove(colID flow.Identifier) error { // remove the collection return operation.RemoveCollection(rw.Writer(), colID) }) - if err != nil { return fmt.Errorf("could not remove collection: %w", err) } return nil } -// StoreLightAndIndexByTransaction stores a light collection and indexes it by transaction ID. -// It's concurrent-safe. -// any error returned are exceptions -func (c *Collections) StoreLightAndIndexByTransaction(collection *flow.LightCollection) error { +// BatchStoreAndIndexByTransaction stores a collection and indexes it by transaction ID within a batch. +// +// CAUTION: current approach is NOT BFT and needs to be revised in the future. +// Honest clusters ensure a transaction can only belong to one collection. However, in rare +// cases, the collector clusters can exceed byzantine thresholds -- making it possible to +// produce multiple finalized collections (aka guaranteed collections) containing the same +// transaction repeatedly. +// TODO: eventually we need to handle Byzantine clusters +// +// No errors are expected during normal operations +func (c *Collections) BatchStoreAndIndexByTransaction(lctx lockctx.Proof, collection *flow.Collection, rw storage.ReaderBatchWriter) (flow.LightCollection, error) { // - This lock is to ensure there is no race condition when indexing collection by transaction ID // - The access node uses this index to report the transaction status. It's done by first // find the collection for a given transaction ID, and then find the block by the collection, @@ -151,45 +163,82 @@ func (c *Collections) StoreLightAndIndexByTransaction(collection *flow.LightColl // inconsistent. // - therefore, we need to check if the transaction is already indexed by a collection, and to // make sure there is no dirty read, we need to use a lock to protect the indexing operation. - // - Note, this approach works because this is the only place where UnsafeIndexCollectionByTransaction + // - Note, this approach works because this is the only place where [operation.IndexCollectionByTransaction] // is used in the code base to index collection by transaction. - collectionID := collection.ID() + if !lctx.HoldsLock(storage.LockInsertCollection) { + return flow.LightCollection{}, fmt.Errorf("missing lock: %v", storage.LockInsertCollection) + } - c.indexingByTx.Lock() - defer c.indexingByTx.Unlock() + light := collection.Light() + collectionID := light.ID() - return c.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - err := operation.UpsertCollection(rw.Writer(), collection) - if err != nil { - return fmt.Errorf("could not insert collection: %w", err) - } + err := operation.UpsertCollection(rw.Writer(), &light) + if err != nil { + return flow.LightCollection{}, fmt.Errorf("could not insert collection: %w", err) + } - for _, txID := range collection.Transactions { - var differentColTxIsIn flow.Identifier - err := operation.LookupCollectionByTransaction(rw.GlobalReader(), txID, &differentColTxIsIn) - if err == nil { - // collection nodes have ensured that a transaction can only belong to one collection - // so if transaction is already indexed by a collection, check if it's the same collection. - // TODO: For now we log a warning, but eventually we need to handle Byzantine clusters - if collectionID != differentColTxIsIn { - log.Error().Msgf("sanity check failed: transaction %v in collection %v is already indexed by a different collection %v", - txID, collectionID, differentColTxIsIn) - } - continue + for _, txID := range light.Transactions { + var differentColTxIsIn flow.Identifier + // The following is not BFT, because we can't handle the case where a transaction is included + // in multiple collections. As long as we only have significantly less than 1/3 byzantine + // collectors in the overall population (across all clusters) this should not happen. + // TODO: For now we log a warning, but eventually we need to handle Byzantine clusters + err := operation.LookupCollectionByTransaction(rw.GlobalReader(), txID, &differentColTxIsIn) + if err == nil { + if collectionID != differentColTxIsIn { + log.Error(). + Str(logging.KeySuspicious, "true"). + Hex("transaction hash", txID[:]). + Hex("previously persisted collection containing transactions", differentColTxIsIn[:]). + Hex("newly encountered collection containing transactions", collectionID[:]). + Msgf("sanity check failed: transaction contained in multiple collections -- this is a symptom of a byzantine collector cluster (or a bug)") } + continue + } + err = operation.IndexCollectionByTransaction(lctx, rw.Writer(), txID, collectionID) + if err != nil { + return flow.LightCollection{}, fmt.Errorf("could not insert transaction ID: %w", err) + } + } - // the indexingByTx lock has ensured we are the only process indexing collection by transaction - err = operation.UnsafeIndexCollectionByTransaction(rw.Writer(), txID, collectionID) - if err != nil { - return fmt.Errorf("could not insert transaction ID: %w", err) - } + // Store individual transactions + for _, tx := range collection.Transactions { + err = c.transactions.storeTx(rw, tx) + if err != nil { + return flow.LightCollection{}, fmt.Errorf("could not insert transaction: %w", err) } + } - return nil + return light, nil +} + +// StoreAndIndexByTransaction stores a collection and indexes it by transaction ID. +// It's concurrent-safe. +// +// CAUTION: current approach is NOT BFT and needs to be revised in the future. +// Honest clusters ensure a transaction can only belong to one collection. However, in rare +// cases, the collector clusters can exceed byzantine thresholds -- making it possible to +// produce multiple finalized collections (aka guaranteed collections) containing the same +// transaction repeatedly. +// TODO: eventually we need to handle Byzantine clusters +// +// No errors are expected during normal operation. +func (c *Collections) StoreAndIndexByTransaction(lctx lockctx.Proof, collection *flow.Collection) (flow.LightCollection, error) { + var light flow.LightCollection + err := c.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + var err error + light, err = c.BatchStoreAndIndexByTransaction(lctx, collection, rw) + return err }) + return light, err } -// LightByTransactionID retrieves a light collection by a transaction ID. +// LightByTransactionID returns a reduced representation of the collection +// holding the given transaction ID. The reduced collection references the +// constituent transactions by their hashes. +// +// Expected errors during normal operation: +// - `storage.ErrNotFound` if no light collection was found. func (c *Collections) LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error) { collID := &flow.Identifier{} err := operation.LookupCollectionByTransaction(c.db.Reader(), txID, collID) diff --git a/storage/store/collections_test.go b/storage/store/collections_test.go index 8318c16e53a..27d6b438d0a 100644 --- a/storage/store/collections_test.go +++ b/storage/store/collections_test.go @@ -17,16 +17,23 @@ import ( func TestCollections(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() transactions := store.NewTransactions(metrics, db) collections := store.NewCollections(db, transactions) - // create a light collection with three transactions - expected := unittest.CollectionFixture(3).Light() + // create a collection with three transactions + expected := unittest.CollectionFixture(3) - // store the light collection and the transaction index - err := collections.StoreLightAndIndexByTransaction(&expected) + // Create a lock manager and context for testing + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertCollection) + require.NoError(t, err) + defer lctx.Release() + + // store the collection and the transaction index + _, err = collections.StoreAndIndexByTransaction(lctx, &expected) require.NoError(t, err) // retrieve the light collection by collection id @@ -34,13 +41,14 @@ func TestCollections(t *testing.T) { require.NoError(t, err) // check if the light collection was indeed persisted - assert.Equal(t, &expected, actual) + expectedLight := expected.Light() + assert.Equal(t, &expectedLight, actual) expectedID := expected.ID() // retrieve the collection light id by each of its transaction id - for _, txID := range expected.Transactions { - collLight, err := collections.LightByTransactionID(txID) + for _, tx := range expected.Transactions { + collLight, err := collections.LightByTransactionID(tx.ID()) actualID := collLight.ID() // check that the collection id can indeed be retrieved by transaction id require.NoError(t, err) @@ -57,11 +65,11 @@ func TestCollections(t *testing.T) { // check that the collection was indeed removed from the transaction index for _, tx := range expected.Transactions { - _, err = collections.LightByTransactionID(tx) + _, err = collections.LightByTransactionID(tx.ID()) assert.Error(t, err) assert.ErrorIs(t, err, storage.ErrNotFound) - _, err = transactions.ByID(tx) + _, err = transactions.ByID(tx.ID()) assert.Error(t, err) assert.ErrorIs(t, err, storage.ErrNotFound) } @@ -72,6 +80,7 @@ func TestCollections(t *testing.T) { // indexed by the tx will be the one that is indexed in storage func TestCollections_IndexDuplicateTx(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() transactions := store.NewTransactions(metrics, db) collections := store.NewCollections(db, transactions) @@ -83,19 +92,24 @@ func TestCollections_IndexDuplicateTx(t *testing.T) { col2Tx := col2.Transactions[0] // transaction that's only in col2 col2.Transactions = append(col2.Transactions, dupTx) + // Create a lock manager and context for testing + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertCollection) + require.NoError(t, err) + defer lctx.Release() + // insert col1 - col1Light := col1.Light() - err := collections.StoreLightAndIndexByTransaction(&col1Light) + _, err = collections.StoreAndIndexByTransaction(lctx, &col1) require.NoError(t, err) // insert col2 - col2Light := col2.Light() - err = collections.StoreLightAndIndexByTransaction(&col2Light) + _, err = collections.StoreAndIndexByTransaction(lctx, &col2) require.NoError(t, err) // should be able to retrieve col2 by ID gotLightByCol2ID, err := collections.LightByID(col2.ID()) require.NoError(t, err) + col2Light := col2.Light() assert.Equal(t, &col2Light, gotLightByCol2ID) // should be able to retrieve col2 by the transaction which only appears in col2 @@ -106,6 +120,7 @@ func TestCollections_IndexDuplicateTx(t *testing.T) { // since col1 is the first collection to be indexed by the shared transaction (dupTx) gotLightByDupTxID, err := collections.LightByTransactionID(dupTx.ID()) require.NoError(t, err) + col1Light := col1.Light() assert.Equal(t, &col1Light, gotLightByDupTxID) }) } @@ -114,6 +129,7 @@ func TestCollections_IndexDuplicateTx(t *testing.T) { // different collection both will succeed, and one of the collection will be indexed by the tx func TestCollections_ConcurrentIndexByTx(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() transactions := store.NewTransactions(metrics, db) collections := store.NewCollections(db, transactions) @@ -127,6 +143,12 @@ func TestCollections_ConcurrentIndexByTx(t *testing.T) { sharedTx := col1.Transactions[0] // The shared transaction col2.Transactions[0] = sharedTx + // Create a lock manager and context for testing + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertCollection) + require.NoError(t, err) + defer lctx.Release() + var wg sync.WaitGroup errChan := make(chan error, 2*numCollections) @@ -137,8 +159,7 @@ func TestCollections_ConcurrentIndexByTx(t *testing.T) { for i := 0; i < numCollections; i++ { col := unittest.CollectionFixture(1) col.Transactions[0] = sharedTx // Ensure it shares the same transaction - light := col.Light() - err := collections.StoreLightAndIndexByTransaction(&light) + _, err := collections.StoreAndIndexByTransaction(lctx, &col) errChan <- err } }() @@ -150,8 +171,7 @@ func TestCollections_ConcurrentIndexByTx(t *testing.T) { for i := 0; i < numCollections; i++ { col := unittest.CollectionFixture(1) col.Transactions[0] = sharedTx // Ensure it shares the same transaction - light := col.Light() - err := collections.StoreLightAndIndexByTransaction(&light) + _, err := collections.StoreAndIndexByTransaction(lctx, &col) errChan <- err } }() diff --git a/storage/store/commits.go b/storage/store/commits.go index 5d6ee7c4a0f..19ca4e1a78b 100644 --- a/storage/store/commits.go +++ b/storage/store/commits.go @@ -1,6 +1,8 @@ package store import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" @@ -13,11 +15,9 @@ type Commits struct { cache *Cache[flow.Identifier, flow.StateCommitment] } -func NewCommits(collector module.CacheMetrics, db storage.DB) *Commits { +var _ storage.Commits = (*Commits)(nil) - store := func(rw storage.ReaderBatchWriter, blockID flow.Identifier, commit flow.StateCommitment) error { - return operation.IndexStateCommitment(rw.Writer(), blockID, commit) - } +func NewCommits(collector module.CacheMetrics, db storage.DB) *Commits { retrieve := func(r storage.Reader, blockID flow.Identifier) (flow.StateCommitment, error) { var commit flow.StateCommitment @@ -33,7 +33,6 @@ func NewCommits(collector module.CacheMetrics, db storage.DB) *Commits { db: db, cache: newCache(collector, metrics.ResourceCommit, withLimit[flow.Identifier, flow.StateCommitment](1000), - withStore(store), withRetrieve(retrieve), withRemove[flow.Identifier, flow.StateCommitment](remove), ), @@ -42,10 +41,6 @@ func NewCommits(collector module.CacheMetrics, db storage.DB) *Commits { return c } -func (c *Commits) storeTx(rw storage.ReaderBatchWriter, blockID flow.Identifier, commit flow.StateCommitment) error { - return c.cache.PutTx(rw, blockID, commit) -} - func (c *Commits) retrieveTx(r storage.Reader, blockID flow.Identifier) (flow.StateCommitment, error) { val, err := c.cache.Get(r, blockID) if err != nil { @@ -54,19 +49,13 @@ func (c *Commits) retrieveTx(r storage.Reader, blockID flow.Identifier) (flow.St return val, nil } -func (c *Commits) Store(blockID flow.Identifier, commit flow.StateCommitment) error { - return c.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return c.storeTx(rw, blockID, commit) - }) -} - // BatchStore stores Commit keyed by blockID in provided batch -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func (c *Commits) BatchStore(blockID flow.Identifier, commit flow.StateCommitment, rw storage.ReaderBatchWriter) error { +// No errors are expected during normal operation. +// If the database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. +func (c *Commits) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, commit flow.StateCommitment, rw storage.ReaderBatchWriter) error { // we can't cache while using batches, as it's unknown at this point when, and if // the batch will be committed. Cache will be populated on read however. - return operation.IndexStateCommitment(rw.Writer(), blockID, commit) + return operation.IndexStateCommitment(lctx, rw, blockID, commit) } func (c *Commits) ByBlockID(blockID flow.Identifier) (flow.StateCommitment, error) { @@ -81,7 +70,7 @@ func (c *Commits) RemoveByBlockID(blockID flow.Identifier) error { // BatchRemoveByBlockID removes Commit keyed by blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. +// If the database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. func (c *Commits) BatchRemoveByBlockID(blockID flow.Identifier, rw storage.ReaderBatchWriter) error { return c.cache.RemoveTx(rw, blockID) } diff --git a/storage/store/commits_test.go b/storage/store/commits_test.go index ce0d9d56434..d30ffbe38ef 100644 --- a/storage/store/commits_test.go +++ b/storage/store/commits_test.go @@ -18,6 +18,7 @@ import ( // TestCommitsStoreAndRetrieve tests that a commit can be store1d, retrieved and attempted to be stored again without an error func TestCommitsStoreAndRetrieve(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() store1 := store.NewCommits(metrics, db) @@ -25,11 +26,16 @@ func TestCommitsStoreAndRetrieve(t *testing.T) { _, err := store1.ByBlockID(unittest.IdentifierFixture()) assert.ErrorIs(t, err, storage.ErrNotFound) - // store1 a commit in db + // store a commit in db blockID := unittest.IdentifierFixture() expected := unittest.StateCommitmentFixture() - err = store1.Store(blockID, expected) + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store1.BatchStore(lctx, blockID, expected, rw) + }) require.NoError(t, err) + lctx.Release() // retrieve the commit by ID actual, err := store1.ByBlockID(blockID) @@ -37,20 +43,31 @@ func TestCommitsStoreAndRetrieve(t *testing.T) { assert.Equal(t, expected, actual) // re-insert the commit - should be idempotent - err = store1.Store(blockID, expected) + lctx = lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store1.BatchStore(lctx, blockID, expected, rw) + }) require.NoError(t, err) + lctx.Release() }) } func TestCommitStoreAndRemove(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() store := store.NewCommits(metrics, db) // Create and store a commit blockID := unittest.IdentifierFixture() expected := unittest.StateCommitmentFixture() - err := store.Store(blockID, expected) + lctx := lockManager.NewContext() + defer lctx.Release() + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) + return store.BatchStore(lctx, blockID, expected, rw) + }) require.NoError(t, err) // Ensure it exists diff --git a/storage/store/consumer_progress.go b/storage/store/consumer_progress.go index aaa331b6173..f8bc131fb2c 100644 --- a/storage/store/consumer_progress.go +++ b/storage/store/consumer_progress.go @@ -12,6 +12,7 @@ import ( // ConsumerProgressInitializer is a helper to initialize the consumer progress index in storage // It prevents the consumer from being used before initialization type ConsumerProgressInitializer struct { + // TODO(7355): lockctx initing sync.Mutex progress *consumerProgress } @@ -62,7 +63,7 @@ func newConsumerProgress(db storage.DB, consumer string) *consumerProgress { } // ProcessedIndex returns the processed index for the consumer -// any error would be exception +// No errors are expected during normal operation func (cp *consumerProgress) ProcessedIndex() (uint64, error) { var processed uint64 err := operation.RetrieveProcessedIndex(cp.db.Reader(), cp.consumer, &processed) @@ -73,8 +74,8 @@ func (cp *consumerProgress) ProcessedIndex() (uint64, error) { } // SetProcessedIndex updates the processed index for the consumer -// any error would be exception // The caller must use ConsumerProgressInitializer to initialize the progress index in storage +// No errors are expected during normal operation func (cp *consumerProgress) SetProcessedIndex(processed uint64) error { err := cp.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return operation.SetProcessedIndex(rw.Writer(), cp.consumer, processed) @@ -85,3 +86,15 @@ func (cp *consumerProgress) SetProcessedIndex(processed uint64) error { return nil } + +// BatchSetProcessedIndex updates the processed index for the consumer within a batch operation +// The caller must use ConsumerProgressInitializer to initialize the progress index in storage +// No errors are expected during normal operation +func (cp *consumerProgress) BatchSetProcessedIndex(processed uint64, batch storage.ReaderBatchWriter) error { + err := operation.SetProcessedIndex(batch.Writer(), cp.consumer, processed) + if err != nil { + return fmt.Errorf("could not add processed index update to batch: %w", err) + } + + return nil +} diff --git a/storage/store/consumer_progress_test.go b/storage/store/consumer_progress_test.go index 1fc70335bf2..fa5171959b5 100644 --- a/storage/store/consumer_progress_test.go +++ b/storage/store/consumer_progress_test.go @@ -12,10 +12,8 @@ import ( func TestConsumerProgressInitializer(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { - const testConsumer = "test_consumer" - t.Run("Initialize with default index", func(t *testing.T) { - cpi := NewConsumerProgress(db, testConsumer) + cpi := NewConsumerProgress(db, "test_consumer1") progress, err := cpi.Initialize(100) require.NoError(t, err) @@ -25,7 +23,7 @@ func TestConsumerProgressInitializer(t *testing.T) { }) t.Run("Initialize when already initialized", func(t *testing.T) { - cpi := NewConsumerProgress(db, testConsumer) + cpi := NewConsumerProgress(db, "test_consumer2") // First initialization _, err := cpi.Initialize(100) @@ -42,7 +40,7 @@ func TestConsumerProgressInitializer(t *testing.T) { }) t.Run("SetProcessedIndex and ProcessedIndex", func(t *testing.T) { - cpi := NewConsumerProgress(db, testConsumer) + cpi := NewConsumerProgress(db, "test_consumer3") progress, err := cpi.Initialize(100) require.NoError(t, err) @@ -53,6 +51,33 @@ func TestConsumerProgressInitializer(t *testing.T) { require.NoError(t, err) assert.Equal(t, uint64(150), index) }) + }) +} + +func TestConsumerProgressBatchSet(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + t.Run("BatchSetProcessedIndex and ProcessedIndex", func(t *testing.T) { + cpi := NewConsumerProgress(db, "test_consumer") + progress, err := cpi.Initialize(100) + require.NoError(t, err) + + err = db.WithReaderBatchWriter(func(r storage.ReaderBatchWriter) error { + err := progress.BatchSetProcessedIndex(150, r) + require.NoError(t, err) + // Verify the index is not set until batch is committed + index, err := progress.ProcessedIndex() + require.NoError(t, err) + assert.Equal(t, uint64(100), index) + + return nil + }) + require.NoError(t, err) + + // Verify the index was updated after batch commit + index, err := progress.ProcessedIndex() + require.NoError(t, err) + assert.Equal(t, uint64(150), index) + }) }) } diff --git a/storage/store/epoch_commits.go b/storage/store/epoch_commits.go new file mode 100644 index 00000000000..bb7256bf767 --- /dev/null +++ b/storage/store/epoch_commits.go @@ -0,0 +1,58 @@ +package store + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type EpochCommits struct { + db storage.DB + cache *Cache[flow.Identifier, *flow.EpochCommit] +} + +func NewEpochCommits(collector module.CacheMetrics, db storage.DB) *EpochCommits { + + store := func(rw storage.ReaderBatchWriter, id flow.Identifier, commit *flow.EpochCommit) error { + return operation.InsertEpochCommit(rw.Writer(), id, commit) + } + + retrieve := func(r storage.Reader, id flow.Identifier) (*flow.EpochCommit, error) { + var commit flow.EpochCommit + err := operation.RetrieveEpochCommit(r, id, &commit) + return &commit, err + } + + ec := &EpochCommits{ + db: db, + cache: newCache[flow.Identifier, *flow.EpochCommit](collector, metrics.ResourceEpochCommit, + withLimit[flow.Identifier, *flow.EpochCommit](4*flow.DefaultTransactionExpiry), + withStore(store), + withRetrieve(retrieve)), + } + + return ec +} + +func (ec *EpochCommits) BatchStore(rw storage.ReaderBatchWriter, commit *flow.EpochCommit) error { + return ec.cache.PutTx(rw, commit.ID(), commit) +} + +func (ec *EpochCommits) retrieveTx(commitID flow.Identifier) (*flow.EpochCommit, error) { + val, err := ec.cache.Get(ec.db.Reader(), commitID) + if err != nil { + return nil, fmt.Errorf("could not retrieve EpochCommit event with id %x: %w", commitID, err) + } + return val, nil +} + +// ByID will return the EpochCommit event by its ID. +// Error returns: +// * storage.ErrNotFound if no EpochCommit with the ID exists +func (ec *EpochCommits) ByID(commitID flow.Identifier) (*flow.EpochCommit, error) { + return ec.retrieveTx(commitID) +} diff --git a/storage/store/epoch_commits_test.go b/storage/store/epoch_commits_test.go new file mode 100644 index 00000000000..eddb2e72904 --- /dev/null +++ b/storage/store/epoch_commits_test.go @@ -0,0 +1,44 @@ +package store_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestEpochCommitStoreAndRetrieve tests that a commit can be sd, retrieved and attempted to be stored again without an error +func TestEpochCommitStoreAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + s := store.NewEpochCommits(metrics, db) + + // attempt to get a invalid commit + _, err := s.ByID(unittest.IdentifierFixture()) + assert.ErrorIs(t, err, storage.ErrNotFound) + + // store a commit in db + expected := unittest.EpochCommitFixture() + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(rw, expected) + }) + require.NoError(t, err) + + // retrieve the commit by ID + actual, err := s.ByID(expected.ID()) + require.NoError(t, err) + assert.Equal(t, expected, actual) + + // test storing same epoch commit + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(rw, expected) + }) + require.NoError(t, err) + }) +} diff --git a/storage/badger/epoch_protocol_state.go b/storage/store/epoch_protocol_state.go similarity index 72% rename from storage/badger/epoch_protocol_state.go rename to storage/store/epoch_protocol_state.go index 717508158b7..df5d10921a1 100644 --- a/storage/badger/epoch_protocol_state.go +++ b/storage/store/epoch_protocol_state.go @@ -1,17 +1,14 @@ -package badger +package store import ( "fmt" - "github.com/dgraph-io/badger/v2" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage/operation" ) // DefaultEpochProtocolStateCacheSize is the default size for primary epoch protocol state entry cache. @@ -27,7 +24,7 @@ var DefaultProtocolStateIndexCacheSize uint = 1000 // sub-states of the overall of the overall Protocol State (KV Store). It uses an embedded cache // which is populated on first retrieval to speed up access to frequently used epoch sub-state. type EpochProtocolStateEntries struct { - db *badger.DB + db storage.DB // cache is essentially an in-memory map from `MinEpochStateEntry.ID()` -> `RichEpochStateEntry` // We do _not_ populate this cache which holds the RichEpochStateEntry's on store. This is because @@ -67,82 +64,77 @@ var _ storage.EpochProtocolStateEntries = (*EpochProtocolStateEntries)(nil) func NewEpochProtocolStateEntries(collector module.CacheMetrics, epochSetups storage.EpochSetups, epochCommits storage.EpochCommits, - db *badger.DB, + db storage.DB, stateCacheSize uint, stateByBlockIDCacheSize uint, ) *EpochProtocolStateEntries { - retrieveByEntryID := func(epochProtocolStateEntryID flow.Identifier) func(tx *badger.Txn) (*flow.RichEpochStateEntry, error) { + retrieveByEntryID := func(r storage.Reader, epochProtocolStateEntryID flow.Identifier) (*flow.RichEpochStateEntry, error) { var entry flow.MinEpochStateEntry - return func(tx *badger.Txn) (*flow.RichEpochStateEntry, error) { - err := operation.RetrieveEpochProtocolState(epochProtocolStateEntryID, &entry)(tx) - if err != nil { - return nil, err - } - result, err := newRichEpochProtocolStateEntry(&entry, epochSetups, epochCommits) - if err != nil { - return nil, fmt.Errorf("could not create RichEpochStateEntry: %w", err) - } - return result, nil + err := operation.RetrieveEpochProtocolState(r, epochProtocolStateEntryID, &entry) + if err != nil { + return nil, err + } + result, err := newRichEpochProtocolStateEntry(&entry, epochSetups, epochCommits) + if err != nil { + return nil, fmt.Errorf("could not create RichEpochStateEntry: %w", err) } + return result, nil } - storeByBlockID := func(blockID flow.Identifier, epochProtocolStateEntryID flow.Identifier) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - err := transaction.WithTx(operation.IndexEpochProtocolState(blockID, epochProtocolStateEntryID))(tx) - if err != nil { - return fmt.Errorf("could not index EpochProtocolState for block (%x): %w", blockID[:], err) - } - return nil + storeByBlockID := func(rw storage.ReaderBatchWriter, blockID flow.Identifier, epochProtocolStateEntryID flow.Identifier) error { + err := operation.IndexEpochProtocolState(rw.Writer(), blockID, epochProtocolStateEntryID) + if err != nil { + return fmt.Errorf("could not index EpochProtocolState for block (%x): %w", blockID[:], err) } + return nil } - retrieveByBlockID := func(blockID flow.Identifier) func(tx *badger.Txn) (flow.Identifier, error) { - return func(tx *badger.Txn) (flow.Identifier, error) { - var entryID flow.Identifier - err := operation.LookupEpochProtocolState(blockID, &entryID)(tx) - if err != nil { - return flow.ZeroID, fmt.Errorf("could not lookup epoch protocol state entry ID for block (%x): %w", blockID[:], err) - } - return entryID, nil + retrieveByBlockID := func(r storage.Reader, blockID flow.Identifier) (flow.Identifier, error) { + var entryID flow.Identifier + err := operation.LookupEpochProtocolState(r, blockID, &entryID) + if err != nil { + return flow.ZeroID, fmt.Errorf("could not lookup epoch protocol state entry ID for block (%x): %w", blockID[:], err) } + return entryID, nil } return &EpochProtocolStateEntries{ db: db, - cache: newCache[flow.Identifier, *flow.RichEpochStateEntry](collector, metrics.ResourceProtocolState, + cache: newCache(collector, metrics.ResourceProtocolState, withLimit[flow.Identifier, *flow.RichEpochStateEntry](stateCacheSize), withStore(noopStore[flow.Identifier, *flow.RichEpochStateEntry]), withRetrieve(retrieveByEntryID)), - byBlockIdCache: newCache[flow.Identifier, flow.Identifier](collector, metrics.ResourceProtocolStateByBlockID, + byBlockIdCache: newCache(collector, metrics.ResourceProtocolStateByBlockID, withLimit[flow.Identifier, flow.Identifier](stateByBlockIDCacheSize), withStore(storeByBlockID), withRetrieve(retrieveByBlockID)), } } -// StoreTx returns an anonymous function (intended to be executed as part of a badger transaction), -// which persists the given epoch protocol state entry as part of a DB tx. Per convention, the identities in +// BatchStore persists the given epoch protocol state entry as part of a DB batch. Per convention, the identities in // the flow.MinEpochStateEntry must be in canonical order for the current and next epoch (if present), // otherwise an exception is returned. -// Expected errors of the returned anonymous function: -// - storage.ErrAlreadyExists if a state entry with the given id is already stored -func (s *EpochProtocolStateEntries) StoreTx(epochProtocolStateEntryID flow.Identifier, epochStateEntry *flow.MinEpochStateEntry) func(*transaction.Tx) error { - // front-load sanity checks: +// No errors are expected during normal operation. +func (s *EpochProtocolStateEntries) BatchStore( + w storage.Writer, + epochProtocolStateEntryID flow.Identifier, + epochStateEntry *flow.MinEpochStateEntry, +) error { + // sanity checks: if !epochStateEntry.CurrentEpoch.ActiveIdentities.Sorted(flow.IdentifierCanonical) { - return transaction.Fail(fmt.Errorf("sanity check failed: identities are not sorted")) + return fmt.Errorf("sanity check failed: identities are not sorted") } if epochStateEntry.NextEpoch != nil && !epochStateEntry.NextEpoch.ActiveIdentities.Sorted(flow.IdentifierCanonical) { - return transaction.Fail(fmt.Errorf("sanity check failed: next epoch identities are not sorted")) + return fmt.Errorf("sanity check failed: next epoch identities are not sorted") } - // happy path: return anonymous function, whose future execution (as part of a transaction) will store the state entry. - return transaction.WithTx(operation.InsertEpochProtocolState(epochProtocolStateEntryID, epochStateEntry)) + // happy path: add storage operation of the state entry to the batch + return operation.InsertEpochProtocolState(w, epochProtocolStateEntryID, epochStateEntry) } -// Index returns an anonymous function that is intended to be executed as part of a database transaction. +// BatchIndex persists the specific map entry in the node's database. // In a nutshell, we want to maintain a map from `blockID` to `epochStateEntry`, where `blockID` references the // block that _proposes_ the referenced epoch protocol state entry. -// Upon call, the anonymous function persists the specific map entry in the node's database. // Protocol convention: // - Consider block B, whose ingestion might potentially lead to an updated protocol state. For example, // the protocol state changes if we seal some execution results emitting service events. @@ -151,19 +143,16 @@ func (s *EpochProtocolStateEntries) StoreTx(epochProtocolStateEntryID flow.Ident // - CAUTION: The protocol state requires confirmation by a QC and will only become active at the child block, // _after_ validating the QC. // -// Expected errors during normal operations: -// - storage.ErrAlreadyExists if a state entry for the given blockID has already been indexed -func (s *EpochProtocolStateEntries) Index(blockID flow.Identifier, epochProtocolStateEntryID flow.Identifier) func(*transaction.Tx) error { - return s.byBlockIdCache.PutTx(blockID, epochProtocolStateEntryID) +// No errors are expected during normal operation. +func (s *EpochProtocolStateEntries) BatchIndex(rw storage.ReaderBatchWriter, blockID flow.Identifier, epochProtocolStateEntryID flow.Identifier) error { + return s.byBlockIdCache.PutTx(rw, blockID, epochProtocolStateEntryID) } // ByID returns the epoch protocol state entry by its ID. // Expected errors during normal operations: // - storage.ErrNotFound if no protocol state with the given Identifier is known. func (s *EpochProtocolStateEntries) ByID(epochProtocolStateEntryID flow.Identifier) (*flow.RichEpochStateEntry, error) { - tx := s.db.NewTransaction(false) - defer tx.Discard() - return s.cache.Get(epochProtocolStateEntryID)(tx) + return s.cache.Get(s.db.Reader(), epochProtocolStateEntryID) } // ByBlockID retrieves the epoch protocol state entry that the block with the given ID proposes. @@ -179,13 +168,11 @@ func (s *EpochProtocolStateEntries) ByID(epochProtocolStateEntryID flow.Identifi // Expected errors during normal operations: // - storage.ErrNotFound if no state entry has been indexed for the given block. func (s *EpochProtocolStateEntries) ByBlockID(blockID flow.Identifier) (*flow.RichEpochStateEntry, error) { - tx := s.db.NewTransaction(false) - defer tx.Discard() - epochProtocolStateEntryID, err := s.byBlockIdCache.Get(blockID)(tx) + epochProtocolStateEntryID, err := s.byBlockIdCache.Get(s.db.Reader(), blockID) if err != nil { return nil, fmt.Errorf("could not lookup epoch protocol state ID for block (%x): %w", blockID[:], err) } - return s.cache.Get(epochProtocolStateEntryID)(tx) + return s.cache.Get(s.db.Reader(), epochProtocolStateEntryID) } // newRichEpochProtocolStateEntry constructs a RichEpochStateEntry from an epoch sub-state entry. @@ -239,13 +226,17 @@ func newRichEpochProtocolStateEntry( } } - epochStateEntry, err := flow.NewEpochStateEntry(minEpochStateEntry, - previousEpochSetup, - previousEpochCommit, - currentEpochSetup, - currentEpochCommit, - nextEpochSetup, - nextEpochCommit) + epochStateEntry, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: minEpochStateEntry, + PreviousEpochSetup: previousEpochSetup, + PreviousEpochCommit: previousEpochCommit, + CurrentEpochSetup: currentEpochSetup, + CurrentEpochCommit: currentEpochCommit, + NextEpochSetup: nextEpochSetup, + NextEpochCommit: nextEpochCommit, + }, + ) if err != nil { // observing an error here would be an indication of severe data corruption or bug in our code since // all data should be available and correctly structured at this point. diff --git a/storage/badger/epoch_protocol_state_test.go b/storage/store/epoch_protocol_state_test.go similarity index 81% rename from storage/badger/epoch_protocol_state_test.go rename to storage/store/epoch_protocol_state_test.go index 9fcef3b2c49..4175f7faaf2 100644 --- a/storage/badger/epoch_protocol_state_test.go +++ b/storage/store/epoch_protocol_state_test.go @@ -1,63 +1,63 @@ -package badger +package store import ( "testing" - "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/mapfunc" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" "github.com/onflow/flow-go/utils/unittest" ) -// TestProtocolStateStorage tests if the protocol state is stored, retrieved and indexed correctly +// TestProtocolStateStorage tests if the protocol state is sd, retrieved and indexed correctly func TestProtocolStateStorage(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() setups := NewEpochSetups(metrics, db) commits := NewEpochCommits(metrics, db) - store := NewEpochProtocolStateEntries(metrics, setups, commits, db, DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) + s := NewEpochProtocolStateEntries(metrics, setups, commits, db, DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) expected := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) protocolStateID := expected.ID() blockID := unittest.IdentifierFixture() // store protocol state and auxiliary info - err := transaction.Update(db, func(tx *transaction.Tx) error { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // store epoch events to be able to retrieve them later - err := setups.StoreTx(expected.PreviousEpochSetup)(tx) + err := setups.BatchStore(rw, expected.PreviousEpochSetup) require.NoError(t, err) - err = setups.StoreTx(expected.CurrentEpochSetup)(tx) + err = setups.BatchStore(rw, expected.CurrentEpochSetup) require.NoError(t, err) - err = setups.StoreTx(expected.NextEpochSetup)(tx) + err = setups.BatchStore(rw, expected.NextEpochSetup) require.NoError(t, err) - err = commits.StoreTx(expected.PreviousEpochCommit)(tx) + err = commits.BatchStore(rw, expected.PreviousEpochCommit) require.NoError(t, err) - err = commits.StoreTx(expected.CurrentEpochCommit)(tx) + err = commits.BatchStore(rw, expected.CurrentEpochCommit) require.NoError(t, err) - err = commits.StoreTx(expected.NextEpochCommit)(tx) + err = commits.BatchStore(rw, expected.NextEpochCommit) require.NoError(t, err) - err = store.StoreTx(protocolStateID, expected.MinEpochStateEntry)(tx) + err = s.BatchStore(rw.Writer(), protocolStateID, expected.MinEpochStateEntry) require.NoError(t, err) - return store.Index(blockID, protocolStateID)(tx) + return s.BatchIndex(rw, blockID, protocolStateID) }) require.NoError(t, err) // fetch protocol state - actual, err := store.ByID(protocolStateID) + actual, err := s.ByID(protocolStateID) require.NoError(t, err) require.Equal(t, expected, actual) assertRichProtocolStateValidity(t, actual) // fetch protocol state by block ID - actualByBlockID, err := store.ByBlockID(blockID) + actualByBlockID, err := s.ByBlockID(blockID) require.NoError(t, err) require.Equal(t, expected, actualByBlockID) @@ -68,23 +68,27 @@ func TestProtocolStateStorage(t *testing.T) { // TestProtocolStateStoreInvalidProtocolState tests that storing protocol state which has unsorted identities fails for // current and next epoch protocol states. func TestProtocolStateStoreInvalidProtocolState(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() setups := NewEpochSetups(metrics, db) commits := NewEpochCommits(metrics, db) - store := NewEpochProtocolStateEntries(metrics, setups, commits, db, DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) + s := NewEpochProtocolStateEntries(metrics, setups, commits, db, DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) invalid := unittest.EpochStateFixture().MinEpochStateEntry // swap first and second elements to break canonical order invalid.CurrentEpoch.ActiveIdentities[0], invalid.CurrentEpoch.ActiveIdentities[1] = invalid.CurrentEpoch.ActiveIdentities[1], invalid.CurrentEpoch.ActiveIdentities[0] - err := transaction.Update(db, store.StoreTx(invalid.ID(), invalid)) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(rw.Writer(), invalid.ID(), invalid) + }) require.Error(t, err) invalid = unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()).MinEpochStateEntry // swap first and second elements to break canonical order invalid.NextEpoch.ActiveIdentities[0], invalid.NextEpoch.ActiveIdentities[1] = invalid.NextEpoch.ActiveIdentities[1], invalid.NextEpoch.ActiveIdentities[0] - err = transaction.Update(db, store.StoreTx(invalid.ID(), invalid)) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(rw.Writer(), invalid.ID(), invalid) + }) require.Error(t, err) }) } @@ -93,12 +97,12 @@ func TestProtocolStateStoreInvalidProtocolState(t *testing.T) { // from current epoch and additionally add participants from previous epoch if they are not present in current epoch. // If the same participant is in the previous and current epochs, we should see it only once in the merged list and the dynamic portion has to be from current epoch. func TestProtocolStateMergeParticipants(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() setups := NewEpochSetups(metrics, db) commits := NewEpochCommits(metrics, db) - store := NewEpochProtocolStateEntries(metrics, setups, commits, db, DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) + s := NewEpochProtocolStateEntries(metrics, setups, commits, db, DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) stateEntry := unittest.EpochStateFixture() // change address of participant in current epoch, so we can distinguish it from the one in previous epoch @@ -112,23 +116,23 @@ func TestProtocolStateMergeParticipants(t *testing.T) { protocolStateID := stateEntry.ID() // store protocol state and auxiliary info - err := transaction.Update(db, func(tx *transaction.Tx) error { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // store epoch events to be able to retrieve them later - err := setups.StoreTx(stateEntry.PreviousEpochSetup)(tx) + err := setups.BatchStore(rw, stateEntry.PreviousEpochSetup) require.NoError(t, err) - err = setups.StoreTx(stateEntry.CurrentEpochSetup)(tx) + err = setups.BatchStore(rw, stateEntry.CurrentEpochSetup) require.NoError(t, err) - err = commits.StoreTx(stateEntry.PreviousEpochCommit)(tx) + err = commits.BatchStore(rw, stateEntry.PreviousEpochCommit) require.NoError(t, err) - err = commits.StoreTx(stateEntry.CurrentEpochCommit)(tx) + err = commits.BatchStore(rw, stateEntry.CurrentEpochCommit) require.NoError(t, err) - return store.StoreTx(protocolStateID, stateEntry.MinEpochStateEntry)(tx) + return s.BatchStore(rw.Writer(), protocolStateID, stateEntry.MinEpochStateEntry) }) require.NoError(t, err) // fetch protocol state - actual, err := store.ByID(protocolStateID) + actual, err := s.ByID(protocolStateID) require.NoError(t, err) require.Equal(t, stateEntry, actual) @@ -142,40 +146,40 @@ func TestProtocolStateMergeParticipants(t *testing.T) { // TestProtocolStateRootSnapshot tests that storing and retrieving root protocol state (in case of bootstrap) works as expected. // Specifically, this means that no prior epoch exists (situation after a spork) from the perspective of the freshly-sporked network. func TestProtocolStateRootSnapshot(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() setups := NewEpochSetups(metrics, db) commits := NewEpochCommits(metrics, db) - store := NewEpochProtocolStateEntries(metrics, setups, commits, db, DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) + s := NewEpochProtocolStateEntries(metrics, setups, commits, db, DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) expected := unittest.RootEpochProtocolStateFixture() protocolStateID := expected.ID() blockID := unittest.IdentifierFixture() // store protocol state and auxiliary info - err := transaction.Update(db, func(tx *transaction.Tx) error { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // store epoch events to be able to retrieve them later - err := setups.StoreTx(expected.CurrentEpochSetup)(tx) + err := setups.BatchStore(rw, expected.CurrentEpochSetup) require.NoError(t, err) - err = commits.StoreTx(expected.CurrentEpochCommit)(tx) + err = commits.BatchStore(rw, expected.CurrentEpochCommit) require.NoError(t, err) - err = store.StoreTx(protocolStateID, expected.MinEpochStateEntry)(tx) + err = s.BatchStore(rw.Writer(), protocolStateID, expected.MinEpochStateEntry) require.NoError(t, err) - return store.Index(blockID, protocolStateID)(tx) + return s.BatchIndex(rw, blockID, protocolStateID) }) require.NoError(t, err) // fetch protocol state - actual, err := store.ByID(protocolStateID) + actual, err := s.ByID(protocolStateID) require.NoError(t, err) require.Equal(t, expected, actual) assertRichProtocolStateValidity(t, actual) // fetch protocol state by block ID - actualByBlockID, err := store.ByBlockID(blockID) + actualByBlockID, err := s.ByBlockID(blockID) require.NoError(t, err) require.Equal(t, expected, actualByBlockID) diff --git a/storage/store/epoch_setups.go b/storage/store/epoch_setups.go new file mode 100644 index 00000000000..56048c58d43 --- /dev/null +++ b/storage/store/epoch_setups.go @@ -0,0 +1,60 @@ +package store + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type EpochSetups struct { + db storage.DB + cache *Cache[flow.Identifier, *flow.EpochSetup] +} + +// NewEpochSetups instantiates a new EpochSetups storage. +func NewEpochSetups(collector module.CacheMetrics, db storage.DB) *EpochSetups { + + store := func(rw storage.ReaderBatchWriter, id flow.Identifier, setup *flow.EpochSetup) error { + return operation.InsertEpochSetup(rw.Writer(), id, setup) + } + + retrieve := func(r storage.Reader, id flow.Identifier) (*flow.EpochSetup, error) { + var setup flow.EpochSetup + err := operation.RetrieveEpochSetup(r, id, &setup) + return &setup, err + } + + es := &EpochSetups{ + db: db, + cache: newCache[flow.Identifier, *flow.EpochSetup](collector, metrics.ResourceEpochSetup, + withLimit[flow.Identifier, *flow.EpochSetup](4*flow.DefaultTransactionExpiry), + withStore(store), + withRetrieve(retrieve)), + } + + return es +} + +// No errors are expected during normal operation. +func (es *EpochSetups) BatchStore(rw storage.ReaderBatchWriter, setup *flow.EpochSetup) error { + return es.cache.PutTx(rw, setup.ID(), setup) +} + +func (es *EpochSetups) retrieveTx(setupID flow.Identifier) (*flow.EpochSetup, error) { + val, err := es.cache.Get(es.db.Reader(), setupID) + if err != nil { + return nil, fmt.Errorf("could not retrieve EpochSetup event with id %x: %w", setupID, err) + } + return val, nil +} + +// ByID will return the EpochSetup event by its ID. +// Error returns: +// * storage.ErrNotFound if no EpochSetup with the ID exists +func (es *EpochSetups) ByID(setupID flow.Identifier) (*flow.EpochSetup, error) { + return es.retrieveTx(setupID) +} diff --git a/storage/store/epoch_setups_test.go b/storage/store/epoch_setups_test.go new file mode 100644 index 00000000000..d6f1548e6ea --- /dev/null +++ b/storage/store/epoch_setups_test.go @@ -0,0 +1,45 @@ +package store_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" + + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" +) + +// TestEpochSetupStoreAndRetrieve tests that a setup can be sd, retrieved and attempted to be stored again without an error +func TestEpochSetupStoreAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + s := store.NewEpochSetups(metrics, db) + + // attempt to get a setup that doesn't exist + _, err := s.ByID(unittest.IdentifierFixture()) + assert.ErrorIs(t, err, storage.ErrNotFound) + + // store a setup in db + expected := unittest.EpochSetupFixture() + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(rw, expected) + }) + require.NoError(t, err) + + // retrieve the setup by ID + actual, err := s.ByID(expected.ID()) + require.NoError(t, err) + assert.Equal(t, expected, actual) + + // test storing same epoch setup + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(rw, expected) + }) + require.NoError(t, err) + }) +} diff --git a/storage/store/guarantees.go b/storage/store/guarantees.go new file mode 100644 index 00000000000..0887f86572a --- /dev/null +++ b/storage/store/guarantees.go @@ -0,0 +1,56 @@ +package store + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// Guarantees implements persistent storage for collection guarantees. +type Guarantees struct { + db storage.DB + cache *Cache[flow.Identifier, *flow.CollectionGuarantee] +} + +func NewGuarantees(collector module.CacheMetrics, db storage.DB, cacheSize uint) *Guarantees { + + storeWithLock := func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, collID flow.Identifier, guarantee *flow.CollectionGuarantee) error { + return operation.UnsafeInsertGuarantee(lctx, rw.Writer(), collID, guarantee) + } + + retrieve := func(r storage.Reader, collID flow.Identifier) (*flow.CollectionGuarantee, error) { + var guarantee flow.CollectionGuarantee + err := operation.RetrieveGuarantee(r, collID, &guarantee) + return &guarantee, err + } + + g := &Guarantees{ + db: db, + cache: newCache(collector, metrics.ResourceGuarantee, + withLimit[flow.Identifier, *flow.CollectionGuarantee](cacheSize), + withStoreWithLock(storeWithLock), + withRetrieve(retrieve)), + } + + return g +} + +func (g *Guarantees) storeTx(lctx lockctx.Proof, rw storage.ReaderBatchWriter, guarantee *flow.CollectionGuarantee) error { + return g.cache.PutWithLockTx(lctx, rw, guarantee.ID(), guarantee) +} + +func (g *Guarantees) retrieveTx(collID flow.Identifier) (*flow.CollectionGuarantee, error) { + val, err := g.cache.Get(g.db.Reader(), collID) + if err != nil { + return nil, err + } + return val, nil +} + +func (g *Guarantees) ByCollectionID(collID flow.Identifier) (*flow.CollectionGuarantee, error) { + return g.retrieveTx(collID) +} diff --git a/storage/store/guarantees_test.go b/storage/store/guarantees_test.go new file mode 100644 index 00000000000..d27684ab8f4 --- /dev/null +++ b/storage/store/guarantees_test.go @@ -0,0 +1,66 @@ +package store_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestGuaranteeStoreRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + all := store.InitAll(metrics, db) + blocks := all.Blocks + guarantees := all.Guarantees + + s := store.NewGuarantees(metrics, db, 1000) + + // make block with a collection guarantee: + expected := unittest.CollectionGuaranteeFixture() + block := unittest.BlockWithGuaranteesFixture([]*flow.CollectionGuarantee{expected}) + + // attempt to retrieve (still) unknown guarantee + _, err := s.ByCollectionID(expected.ID()) + require.ErrorIs(t, err, storage.ErrNotFound) + + // store guarantee + unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, block) + }) + }) + + // retrieve the guarantee by the ID of the collection + actual, err := guarantees.ByCollectionID(expected.ID()) + require.NoError(t, err) + require.Equal(t, expected, actual) + + // repeated storage of the same block should return + lctx2 := lockManager.NewContext() + require.NoError(t, lctx2.AcquireLock(storage.LockInsertBlock)) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx2, rw, block) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + lctx2.Release() + + // OK to store a different block + expected2 := unittest.CollectionGuaranteeFixture() + block2 := unittest.BlockWithGuaranteesFixture([]*flow.CollectionGuarantee{expected2}) + lctx3 := lockManager.NewContext() + require.NoError(t, lctx3.AcquireLock(storage.LockInsertBlock)) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx3, rw, block2) + })) + lctx3.Release() + }) +} diff --git a/storage/store/headers.go b/storage/store/headers.go new file mode 100644 index 00000000000..db80564f1c3 --- /dev/null +++ b/storage/store/headers.go @@ -0,0 +1,212 @@ +package store + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/procedure" +) + +// Headers implements a simple read-only header storage around a DB. +type Headers struct { + db storage.DB + cache *Cache[flow.Identifier, *flow.Header] + heightCache *Cache[uint64, flow.Identifier] + viewCache *Cache[uint64, flow.Identifier] +} + +var _ storage.Headers = (*Headers)(nil) + +func NewHeaders(collector module.CacheMetrics, db storage.DB) *Headers { + + storeWithLock := func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, header *flow.Header) error { + return operation.InsertHeader(lctx, rw, blockID, header) + } + + retrieve := func(r storage.Reader, blockID flow.Identifier) (*flow.Header, error) { + var header flow.Header + err := operation.RetrieveHeader(r, blockID, &header) + return &header, err + } + + retrieveHeight := func(r storage.Reader, height uint64) (flow.Identifier, error) { + var id flow.Identifier + err := operation.LookupBlockHeight(r, height, &id) + return id, err + } + + retrieveView := func(r storage.Reader, view uint64) (flow.Identifier, error) { + var id flow.Identifier + err := operation.LookupCertifiedBlockByView(r, view, &id) + return id, err + } + + h := &Headers{ + db: db, + cache: newCache(collector, metrics.ResourceHeader, + withLimit[flow.Identifier, *flow.Header](4*flow.DefaultTransactionExpiry), + withStoreWithLock(storeWithLock), + withRetrieve(retrieve)), + + heightCache: newCache(collector, metrics.ResourceFinalizedHeight, + withLimit[uint64, flow.Identifier](4*flow.DefaultTransactionExpiry), + withRetrieve(retrieveHeight)), + + viewCache: newCache(collector, metrics.ResourceCertifiedView, + withLimit[uint64, flow.Identifier](4*flow.DefaultTransactionExpiry), + withRetrieve(retrieveView)), + } + + return h +} + +func (h *Headers) storeTx(lctx lockctx.Proof, rw storage.ReaderBatchWriter, header *flow.Header) error { + return h.cache.PutWithLockTx(lctx, rw, header.ID(), header) +} + +func (h *Headers) retrieveTx(blockID flow.Identifier) (*flow.Header, error) { + val, err := h.cache.Get(h.db.Reader(), blockID) + if err != nil { + return nil, err + } + return val, nil +} + +// results in `storage.ErrNotFound` for unknown height +func (h *Headers) retrieveIdByHeightTx(height uint64) (flow.Identifier, error) { + blockID, err := h.heightCache.Get(h.db.Reader(), height) + if err != nil { + return flow.ZeroID, fmt.Errorf("failed to retrieve block ID for height %d: %w", height, err) + } + return blockID, nil +} + +func (h *Headers) ByBlockID(blockID flow.Identifier) (*flow.Header, error) { + return h.retrieveTx(blockID) +} + +func (h *Headers) ByHeight(height uint64) (*flow.Header, error) { + blockID, err := h.retrieveIdByHeightTx(height) + if err != nil { + return nil, err + } + return h.retrieveTx(blockID) +} + +// ByView returns block header for the given view. It is only available for certified blocks. +// Certified blocks are the blocks that have received a QC. Hotstuff guarantees that for each view, +// at most one block is certified. Hence, the return value of `ByView` is guaranteed to be unique +// even for non-finalized blocks. +// Expected errors during normal operations: +// - `storage.ErrNotFound` if no certified block is known at given view. +// +// NOTE: this method is not available until next spork (mainnet27) or a migration that builds the index. +func (h *Headers) ByView(view uint64) (*flow.Header, error) { + blockID, err := h.viewCache.Get(h.db.Reader(), view) + if err != nil { + return nil, err + } + return h.retrieveTx(blockID) +} + +// Exists returns true if a header with the given ID has been stored. +// No errors are expected during normal operation. +func (h *Headers) Exists(blockID flow.Identifier) (bool, error) { + // if the block is in the cache, return true + if ok := h.cache.IsCached(blockID); ok { + return ok, nil + } + // otherwise, check badger store + exists, err := operation.BlockExists(h.db.Reader(), blockID) + if err != nil { + return false, fmt.Errorf("could not check existence: %w", err) + } + return exists, nil +} + +// BlockIDByHeight returns the block ID that is finalized at the given height. It is an optimized +// version of `ByHeight` that skips retrieving the block. Expected errors during normal operations: +// - `storage.ErrNotFound` if no finalized block is known at given height. +func (h *Headers) BlockIDByHeight(height uint64) (flow.Identifier, error) { + blockID, err := h.retrieveIdByHeightTx(height) + if err != nil { + return flow.ZeroID, fmt.Errorf("could not lookup block id by height %d: %w", height, err) + } + return blockID, nil +} + +func (h *Headers) ByParentID(parentID flow.Identifier) ([]*flow.Header, error) { + var blockIDs flow.IdentifierList + err := procedure.LookupBlockChildren(h.db.Reader(), parentID, &blockIDs) + if err != nil { + return nil, fmt.Errorf("could not look up children: %w", err) + } + headers := make([]*flow.Header, 0, len(blockIDs)) + for _, blockID := range blockIDs { + header, err := h.ByBlockID(blockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve child (%x): %w", blockID, err) + } + headers = append(headers, header) + } + return headers, nil +} + +// BlockIDByView returns the block ID that is certified at the given view. It is an optimized +// version of `ByView` that skips retrieving the block. Expected errors during normal operations: +// - `storage.ErrNotFound` if no certified block is known at given view. +// +// NOTE: this method is not available until next spork (mainnet27) or a migration that builds the index. +func (h *Headers) BlockIDByView(view uint64) (flow.Identifier, error) { + blockID, err := h.viewCache.Get(h.db.Reader(), view) + if err != nil { + return flow.ZeroID, fmt.Errorf("could not lookup block id by view %d: %w", view, err) + } + return blockID, nil +} + +func (h *Headers) FindHeaders(filter func(header *flow.Header) bool) ([]flow.Header, error) { + blocks := make([]flow.Header, 0, 1) + err := operation.FindHeaders(h.db.Reader(), filter, &blocks) + return blocks, err +} + +// RollbackExecutedBlock update the executed block header to the given header. +// Intended to be used by Execution Nodes only, to roll back executed block height. +// This method is NOT CONCURRENT SAFE, the caller should make sure to call +// this method in a single thread. +func (h *Headers) RollbackExecutedBlock(header *flow.Header) error { + var blockID flow.Identifier + err := operation.RetrieveExecutedBlock(h.db.Reader(), &blockID) + if err != nil { + return fmt.Errorf("cannot lookup executed block: %w", err) + } + + var highest flow.Header + err = operation.RetrieveHeader(h.db.Reader(), blockID, &highest) + if err != nil { + return fmt.Errorf("cannot retrieve executed header: %w", err) + } + + // only rollback if the given height is below the current executed height + if header.Height >= highest.Height { + return fmt.Errorf("cannot roolback. expect the target height %v to be lower than highest executed height %v, but actually is not", + header.Height, highest.Height, + ) + } + + return h.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err = operation.UpdateExecutedBlock(rw.Writer(), header.ID()) + if err != nil { + return fmt.Errorf("cannot update highest executed block: %w", err) + } + + return nil + }) +} diff --git a/storage/store/headers_test.go b/storage/store/headers_test.go new file mode 100644 index 00000000000..81287b642d0 --- /dev/null +++ b/storage/store/headers_test.go @@ -0,0 +1,65 @@ +package store_test + +import ( + "testing" + + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestHeaderStoreRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + all := store.InitAll(metrics, db) + headers := all.Headers + blocks := all.Blocks + + block := unittest.BlockFixture() + + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(t, err) + defer lctx.Release() + + // store block which will also store header + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, &block) + }) + require.NoError(t, err) + + lctx2 := lockManager.NewContext() + require.NoError(t, lctx2.AcquireLock(storage.LockFinalizeBlock)) + // index the header + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx2, rw, block.Header.Height, block.ID()) + }) + lctx2.Release() + require.NoError(t, err) + + // retrieve header by height + actual, err := headers.ByHeight(block.Header.Height) + require.NoError(t, err) + require.Equal(t, block.Header, actual) + }) +} + +func TestHeaderRetrieveWithoutStore(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + headers := store.NewHeaders(metrics, db) + + header := unittest.BlockHeaderFixture() + + // retrieve header by height, should err as not store before height + _, err := headers.ByHeight(header.Height) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} diff --git a/storage/store/index.go b/storage/store/index.go new file mode 100644 index 00000000000..901d7329e06 --- /dev/null +++ b/storage/store/index.go @@ -0,0 +1,51 @@ +package store + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/procedure" +) + +// Index implements a simple read-only payload storage around a badger DB. +type Index struct { + db storage.DB + cache *Cache[flow.Identifier, *flow.Index] +} + +func NewIndex(collector module.CacheMetrics, db storage.DB) *Index { + storeWithLock := func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, index *flow.Index) error { + return procedure.InsertIndex(lctx, rw, blockID, index) + } + + retrieve := func(r storage.Reader, blockID flow.Identifier) (*flow.Index, error) { + var index flow.Index + err := procedure.RetrieveIndex(r, blockID, &index) + return &index, err + } + + p := &Index{ + db: db, + cache: newCache(collector, metrics.ResourceIndex, + withLimit[flow.Identifier, *flow.Index](flow.DefaultTransactionExpiry+100), + withStoreWithLock(storeWithLock), + withRetrieve(retrieve)), + } + + return p +} + +func (i *Index) storeTx(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, index *flow.Index) error { + return i.cache.PutWithLockTx(lctx, rw, blockID, index) +} + +func (i *Index) ByBlockID(blockID flow.Identifier) (*flow.Index, error) { + val, err := i.cache.Get(i.db.Reader(), blockID) + if err != nil { + return nil, err + } + return val, nil +} diff --git a/storage/store/init.go b/storage/store/init.go new file mode 100644 index 00000000000..4ad6ea49b50 --- /dev/null +++ b/storage/store/init.go @@ -0,0 +1,72 @@ +package store + +import ( + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/storage" +) + +type All struct { + Headers *Headers + Guarantees *Guarantees + Seals *Seals + Index *Index + Payloads *Payloads + Blocks *Blocks + QuorumCertificates *QuorumCertificates + Results *ExecutionResults + Receipts *ExecutionReceipts + Commits *Commits + + EpochSetups *EpochSetups + EpochCommits *EpochCommits + EpochProtocolStateEntries *EpochProtocolStateEntries + ProtocolKVStore *ProtocolKVStore + VersionBeacons *VersionBeacons + + Transactions *Transactions + Collections *Collections +} + +func InitAll(metrics module.CacheMetrics, db storage.DB) *All { + headers := NewHeaders(metrics, db) + guarantees := NewGuarantees(metrics, db, DefaultCacheSize) + seals := NewSeals(metrics, db) + index := NewIndex(metrics, db) + results := NewExecutionResults(metrics, db) + receipts := NewExecutionReceipts(metrics, db, results, DefaultCacheSize) + payloads := NewPayloads(db, index, guarantees, seals, receipts, results) + blocks := NewBlocks(db, headers, payloads) + qcs := NewQuorumCertificates(metrics, db, DefaultCacheSize) + commits := NewCommits(metrics, db) + + setups := NewEpochSetups(metrics, db) + epochCommits := NewEpochCommits(metrics, db) + epochProtocolStateEntries := NewEpochProtocolStateEntries(metrics, setups, epochCommits, db, + DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) + protocolKVStore := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) + versionBeacons := NewVersionBeacons(db) + + transactions := NewTransactions(metrics, db) + collections := NewCollections(db, transactions) + + return &All{ + Headers: headers, + Guarantees: guarantees, + Seals: seals, + Index: index, + Payloads: payloads, + Blocks: blocks, + QuorumCertificates: qcs, + Results: results, + Receipts: receipts, + Commits: commits, + EpochCommits: epochCommits, + EpochSetups: setups, + EpochProtocolStateEntries: epochProtocolStateEntries, + ProtocolKVStore: protocolKVStore, + VersionBeacons: versionBeacons, + + Transactions: transactions, + Collections: collections, + } +} diff --git a/storage/store/inmemory/unsynchronized/collections.go b/storage/store/inmemory/unsynchronized/collections.go index bf76a1bde43..00c9b311782 100644 --- a/storage/store/inmemory/unsynchronized/collections.go +++ b/storage/store/inmemory/unsynchronized/collections.go @@ -1,8 +1,11 @@ package unsynchronized import ( + "fmt" "sync" + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" ) @@ -16,15 +19,18 @@ type Collections struct { collections map[flow.Identifier]*flow.Collection lightCollections map[flow.Identifier]*flow.LightCollection transactionIDToLightCollection map[flow.Identifier]*flow.LightCollection + + transactions *Transactions // Reference to Transactions to store txs when storing collections } var _ storage.Collections = (*Collections)(nil) -func NewCollections() *Collections { +func NewCollections(transactions *Transactions) *Collections { return &Collections{ collections: make(map[flow.Identifier]*flow.Collection), lightCollections: make(map[flow.Identifier]*flow.LightCollection), transactionIDToLightCollection: make(map[flow.Identifier]*flow.LightCollection), + transactions: transactions, } } @@ -78,38 +84,45 @@ func (c *Collections) LightByTransactionID(txID flow.Identifier) (*flow.LightCol // Store inserts the collection keyed by ID and all constituent transactions. // No errors are expected during normal operation. -func (c *Collections) Store(collection *flow.Collection) error { +func (c *Collections) Store(collection *flow.Collection) (flow.LightCollection, error) { c.lock.Lock() defer c.lock.Unlock() c.collections[collection.ID()] = collection - return nil + light := collection.Light() + return light, nil } -// StoreLightAndIndexByTransaction inserts the light collection (only +// StoreAndIndexByTransaction inserts the light collection (only // transaction IDs) and adds a transaction id index for each of the // transactions within the collection (transaction_id->collection_id). // -// NOTE: Currently it is possible in rare circumstances for two collections -// to be guaranteed which both contain the same transaction (see https://github.com/dapperlabs/flow-go/issues/3556). -// The second of these will revert upon reaching the execution node, so -// this doesn't impact the execution state, but it can result in the Access -// node processing two collections which both contain the same transaction (see https://github.com/dapperlabs/flow-go/issues/5337). -// To handle this, we skip indexing the affected transaction when inserting -// the transaction_id->collection_id index when an index for the transaction -// already exists. +// CAUTION: current approach is NOT BFT and needs to be revised in the future. +// Honest clusters ensure a transaction can only belong to one collection. However, in rare +// cases, the collector clusters can exceed byzantine thresholds -- making it possible to +// produce multiple finalized collections (aka guaranteed collections) containing the same +// transaction repeatedly. +// TODO: eventually we need to handle Byzantine clusters // // No errors are expected during normal operation. -func (c *Collections) StoreLightAndIndexByTransaction(collection *flow.LightCollection) error { +func (c *Collections) StoreAndIndexByTransaction(_ lockctx.Proof, collection *flow.Collection) (flow.LightCollection, error) { c.lock.Lock() defer c.lock.Unlock() - c.lightCollections[collection.ID()] = collection - for _, txID := range collection.Transactions { - c.transactionIDToLightCollection[txID] = collection + c.collections[collection.ID()] = collection + light := collection.Light() + c.lightCollections[light.ID()] = &light + for _, txID := range light.Transactions { + c.transactionIDToLightCollection[txID] = &light } - return nil + for _, tx := range collection.Transactions { + if err := c.transactions.Store(tx); err != nil { + return flow.LightCollection{}, fmt.Errorf("could not index transaction: %w", err) + } + } + + return light, nil } // Remove removes the collection and all constituent transactions. @@ -152,3 +165,17 @@ func (c *Collections) LightCollections() []flow.LightCollection { } return out } + +// BatchStoreAndIndexByTransaction stores a light collection and indexes it by transaction ID within a batch operation. +// +// CAUTION: current approach is NOT BFT and needs to be revised in the future. +// Honest clusters ensure a transaction can only belong to one collection. However, in rare +// cases, the collector clusters can exceed byzantine thresholds -- making it possible to +// produce multiple finalized collections (aka guaranteed collections) containing the same +// transaction repeatedly. +// TODO: eventually we need to handle Byzantine clusters +// +// This method is not implemented and will always return an error. +func (c *Collections) BatchStoreAndIndexByTransaction(_ lockctx.Proof, _ *flow.Collection, _ storage.ReaderBatchWriter) (flow.LightCollection, error) { + return flow.LightCollection{}, fmt.Errorf("not implemented") +} diff --git a/storage/store/inmemory/unsynchronized/collections_test.go b/storage/store/inmemory/unsynchronized/collections_test.go index c77ade0a110..de12e324263 100644 --- a/storage/store/inmemory/unsynchronized/collections_test.go +++ b/storage/store/inmemory/unsynchronized/collections_test.go @@ -5,18 +5,18 @@ import ( "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/unittest" ) func TestCollection_HappyCase(t *testing.T) { - collections := NewCollections() + transactions := NewTransactions() + collections := NewCollections(transactions) collection := unittest.CollectionFixture(3) // Store collection - err := collections.Store(&collection) + _, err := collections.Store(&collection) require.NoError(t, err) // Retrieve collection @@ -39,24 +39,25 @@ func TestCollection_HappyCase(t *testing.T) { } func TestLightByTransactionID_HappyCase(t *testing.T) { - collections := NewCollections() - lightCollection := &flow.LightCollection{ - Transactions: []flow.Identifier{unittest.IdentifierFixture(), unittest.IdentifierFixture()}, - } + transactions := NewTransactions() + collections := NewCollections(transactions) + collection := unittest.CollectionFixture(2) - err := collections.StoreLightAndIndexByTransaction(lightCollection) + // Create a no-op lock context for testing + _, err := collections.StoreAndIndexByTransaction(nil, &collection) require.NoError(t, err) // Fetch by transaction ID and validate - retrieved, err := collections.LightByTransactionID(lightCollection.Transactions[0]) + retrieved, err := collections.LightByTransactionID(collection.Transactions[0].ID()) require.NoError(t, err) - require.Equal(t, lightCollection, retrieved) + lightCollection := collection.Light() + require.Equal(t, &lightCollection, retrieved) - retrieved, err = collections.LightByTransactionID(lightCollection.Transactions[1]) + retrieved, err = collections.LightByTransactionID(collection.Transactions[1].ID()) require.NoError(t, err) - require.Equal(t, lightCollection, retrieved) + require.Equal(t, &lightCollection, retrieved) extracted := collections.LightCollections() require.Len(t, extracted, 1) - require.Equal(t, *lightCollection, extracted[0]) + require.Equal(t, lightCollection, extracted[0]) } diff --git a/storage/store/inmemory/unsynchronized/registers.go b/storage/store/inmemory/unsynchronized/registers.go index 543573670be..7b42ba6e199 100644 --- a/storage/store/inmemory/unsynchronized/registers.go +++ b/storage/store/inmemory/unsynchronized/registers.go @@ -1,6 +1,7 @@ package unsynchronized import ( + "fmt" "sync" "github.com/onflow/flow-go/model/flow" @@ -37,11 +38,12 @@ func (r *Registers) Get(registerID flow.RegisterID, height uint64) (flow.Registe return flow.RegisterValue{}, storage.ErrHeightNotIndexed } - if reg, ok := r.store[registerID]; ok { - return reg, nil + reg, ok := r.store[registerID] + if !ok { + return flow.RegisterValue{}, storage.ErrNotFound } - return flow.RegisterValue{}, storage.ErrNotFound + return reg, nil } // LatestHeight returns the latest indexed height. @@ -55,15 +57,13 @@ func (r *Registers) FirstHeight() uint64 { } // Store stores a batch of register entries at the storage's block height. -// -// Expected errors: -// - storage.ErrHeightNotIndexed if the given height does not match the storage's block height. +// No errors are expected during normal operation. func (r *Registers) Store(registers flow.RegisterEntries, height uint64) error { r.lock.Lock() defer r.lock.Unlock() if r.blockHeight != height { - return storage.ErrHeightNotIndexed + return fmt.Errorf("failed to store registers: height mismatch: expected %d, got %d", r.blockHeight, height) } for _, reg := range registers { @@ -73,13 +73,19 @@ func (r *Registers) Store(registers flow.RegisterEntries, height uint64) error { return nil } -func (r *Registers) Data() []flow.RegisterEntry { +// Data returns all register entries for the specified height. +// No errors are expected during normal operation. +func (r *Registers) Data(height uint64) ([]flow.RegisterEntry, error) { r.lock.RLock() defer r.lock.RUnlock() + if r.blockHeight != height { + return nil, fmt.Errorf("failed to get registers: height mismatch: expected %d, got %d", r.blockHeight, height) + } + out := make([]flow.RegisterEntry, 0, len(r.store)) for k, v := range r.store { out = append(out, flow.RegisterEntry{Key: k, Value: v}) } - return out + return out, nil } diff --git a/storage/store/inmemory/unsynchronized/registers_test.go b/storage/store/inmemory/unsynchronized/registers_test.go index b7a6ae8d9b0..68ef4d09816 100644 --- a/storage/store/inmemory/unsynchronized/registers_test.go +++ b/storage/store/inmemory/unsynchronized/registers_test.go @@ -3,6 +3,7 @@ package unsynchronized import ( "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" @@ -45,14 +46,16 @@ func TestRegisters_HappyPath(t *testing.T) { // Try storing at the wrong height err = registers.Store(entries, height+1) - require.ErrorIs(t, err, storage.ErrHeightNotIndexed) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to store registers: height mismatch:") // Try getting a non-existent key _, err = registers.Get(unittest.RegisterIDFixture(), height) require.ErrorIs(t, err, storage.ErrNotFound) // Extract registers - data := registers.Data() + data, err := registers.Data(height) + require.NoError(t, err) require.Len(t, data, len(entries)) require.ElementsMatch(t, entries, data) } diff --git a/storage/store/inmemory/unsynchronized/transaction_result_error_messages.go b/storage/store/inmemory/unsynchronized/transaction_result_error_messages.go index b669296764d..a3fae2f7b3d 100644 --- a/storage/store/inmemory/unsynchronized/transaction_result_error_messages.go +++ b/storage/store/inmemory/unsynchronized/transaction_result_error_messages.go @@ -2,6 +2,7 @@ package unsynchronized import ( "errors" + "fmt" "sync" "github.com/onflow/flow-go/model/flow" @@ -135,3 +136,9 @@ func (t *TransactionResultErrorMessages) Data() []flow.TransactionResultErrorMes } return out } + +// BatchStore inserts a batch of transaction result error messages into a batch +// This method is not implemented and will always return an error. +func (t *TransactionResultErrorMessages) BatchStore(blockID flow.Identifier, transactionResultErrorMessages []flow.TransactionResultErrorMessage, batch storage.ReaderBatchWriter) error { + return fmt.Errorf("not implemented") +} diff --git a/storage/store/inmemory/unsynchronized/transactions.go b/storage/store/inmemory/unsynchronized/transactions.go index d8c9fe703c6..9fc3777c17a 100644 --- a/storage/store/inmemory/unsynchronized/transactions.go +++ b/storage/store/inmemory/unsynchronized/transactions.go @@ -1,6 +1,7 @@ package unsynchronized import ( + "fmt" "sync" "github.com/onflow/flow-go/model/flow" @@ -64,3 +65,9 @@ func (t *Transactions) Data() []flow.TransactionBody { } return out } + +// BatchStore stores transaction within a batch operation. +// This method is not implemented and will always return an error. +func (t *Transactions) BatchStore(_ *flow.TransactionBody, _ storage.ReaderBatchWriter) error { + return fmt.Errorf("not implemented") +} diff --git a/storage/store/latest_persisted_sealed_result.go b/storage/store/latest_persisted_sealed_result.go new file mode 100644 index 00000000000..bcb8957ef63 --- /dev/null +++ b/storage/store/latest_persisted_sealed_result.go @@ -0,0 +1,106 @@ +package store + +import ( + "fmt" + "sync" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +var _ storage.LatestPersistedSealedResult = (*LatestPersistedSealedResult)(nil) + +// LatestPersistedSealedResult tracks the most recently persisted sealed execution result processed +// by the Access ingestion engine. +type LatestPersistedSealedResult struct { + // resultID is the execution result ID of the most recently persisted sealed result. + resultID flow.Identifier + + // height is the height of the most recently persisted sealed result's block. + // This is the value stored in the consumer progress index. + height uint64 + + // progress is the consumer progress instance + progress storage.ConsumerProgress + + // batchMu is used to prevent concurrent batch updates to the persisted height. + // the critical section is fairly large, so use a separate mutex from the cached values. + batchMu sync.Mutex + + // cacheMu is used to protect access to resultID and height. + cacheMu sync.RWMutex +} + +// NewLatestPersistedSealedResult creates a new LatestPersistedSealedResult instance. +// +// No errors are expected during normal operation, +func NewLatestPersistedSealedResult( + progress storage.ConsumerProgress, + headers storage.Headers, + results storage.ExecutionResults, +) (*LatestPersistedSealedResult, error) { + // load the height and resultID of the latest persisted sealed result + height, err := progress.ProcessedIndex() + if err != nil { + return nil, fmt.Errorf("could not get processed index: %w", err) + } + + header, err := headers.ByHeight(height) + if err != nil { + return nil, fmt.Errorf("could not get header: %w", err) + } + + // Note: the result-to-block relationship is indexed by the Access ingestion engine when a + // result is sealed. + result, err := results.ByBlockID(header.ID()) + if err != nil { + return nil, fmt.Errorf("could not get result: %w", err) + } + + return &LatestPersistedSealedResult{ + resultID: result.ID(), + height: height, + progress: progress, + }, nil +} + +// Latest returns the ID and height of the latest persisted sealed result. +func (l *LatestPersistedSealedResult) Latest() (flow.Identifier, uint64) { + l.cacheMu.RLock() + defer l.cacheMu.RUnlock() + return l.resultID, l.height +} + +// BatchSet updates the latest persisted sealed result in a batch operation +// The resultID and height are added to the provided batch, and the local data is updated only after +// the batch is successfully committed. +// +// No errors are expected during normal operation, +func (l *LatestPersistedSealedResult) BatchSet(resultID flow.Identifier, height uint64, batch storage.ReaderBatchWriter) error { + // there are 2 mutexes used here: + // - batchMu is used to prevent concurrent batch updates to the persisted height. Since this + // is a global variable, we need to ensure that only a single batch is in progress at a time. + // - cacheMu is used to protect access to the cached resultID and height values. This is an + // optimization to avoid readers having to block during the batch operations, since they + // can have arbitrarily long setup times. + l.batchMu.Lock() + + batch.AddCallback(func(err error) { + defer l.batchMu.Unlock() + if err != nil { + return + } + + l.cacheMu.Lock() + defer l.cacheMu.Unlock() + + l.resultID = resultID + l.height = height + }) + + if err := l.progress.BatchSetProcessedIndex(height, batch); err != nil { + return fmt.Errorf("could not add processed index update to batch: %w", err) + } + + return nil +} diff --git a/storage/store/latest_persisted_sealed_result_test.go b/storage/store/latest_persisted_sealed_result_test.go new file mode 100644 index 00000000000..3a1f35ca82a --- /dev/null +++ b/storage/store/latest_persisted_sealed_result_test.go @@ -0,0 +1,259 @@ +package store + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/cockroachdb/pebble/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewLatestPersistedSealedResult tests the initialization of LatestPersistedSealedResult. +// It verifies that: +// - The ConsumerProgress is properly stored +// - All fields are correctly initialized on success +func TestNewLatestPersistedSealedResult(t *testing.T) { + initialHeight := uint64(100) + missingHeaderHeight := initialHeight + 1 + missingResultHeight := initialHeight + 2 + + initialHeader, initialResult, mockHeaders, mockResults := getHeadersResults(t, initialHeight) + + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + + t.Run("successful initialization", func(t *testing.T) { + initializer := NewConsumerProgress(db, "test_consumer1") + progress, err := initializer.Initialize(initialHeight) + require.NoError(t, err) + + latest, err := NewLatestPersistedSealedResult(progress, mockHeaders, mockResults) + require.NoError(t, err) + + require.NotNil(t, latest) + + actualResultID, actualHeight := latest.Latest() + + assert.Equal(t, initialResult.ID(), actualResultID) + assert.Equal(t, initialHeader.Height, actualHeight) + }) + + t.Run("processed index error", func(t *testing.T) { + expectedErr := fmt.Errorf("processed index error") + + mockCP := storagemock.NewConsumerProgress(t) + mockCP.On("ProcessedIndex").Return(uint64(0), expectedErr) + + latest, err := NewLatestPersistedSealedResult(mockCP, nil, nil) + + assert.ErrorIs(t, err, expectedErr) + require.Nil(t, latest) + }) + + t.Run("header lookup error", func(t *testing.T) { + expectedErr := fmt.Errorf("header lookup error") + + initializer := NewConsumerProgress(db, "test_consumer2") + progress, err := initializer.Initialize(missingHeaderHeight) + require.NoError(t, err) + + mockHeaders.On("ByHeight", missingHeaderHeight).Return(nil, expectedErr) + + latest, err := NewLatestPersistedSealedResult(progress, mockHeaders, nil) + + assert.ErrorIs(t, err, expectedErr) + require.Nil(t, latest) + }) + + t.Run("result lookup error", func(t *testing.T) { + expectedErr := fmt.Errorf("result lookup error") + + initializer := NewConsumerProgress(db, "test_consumer3") + progress, err := initializer.Initialize(missingResultHeight) + require.NoError(t, err) + + header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(missingResultHeight)) + + mockHeaders.On("ByHeight", missingResultHeight).Return(header, nil) + mockResults.On("ByBlockID", header.ID()).Return(nil, expectedErr) + + latest, err := NewLatestPersistedSealedResult(progress, mockHeaders, mockResults) + + assert.ErrorIs(t, err, expectedErr) + require.Nil(t, latest) + }) + }) + +} + +// TestLatestPersistedSealedResult_BatchSet tests the batch update functionality. +// It verifies that: +// - Updates are atomic - either all state is updated or none +// - The callback mechanism works correctly for both success and failure cases +// - State is not updated if BatchSetProcessedIndex fails +// - State is only updated after the batch callback indicates success +func TestLatestPersistedSealedResult_BatchSet(t *testing.T) { + initialHeader, initialResult, mockHeaders, mockResults := getHeadersResults(t, 100) + + newResultID := unittest.IdentifierFixture() + newHeight := uint64(200) + + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + + t.Run("successful batch update", func(t *testing.T) { + initializer := NewConsumerProgress(db, "test_consumer1") + progress, err := initializer.Initialize(initialHeader.Height) + require.NoError(t, err) + + latest, err := NewLatestPersistedSealedResult(progress, mockHeaders, mockResults) + require.NoError(t, err) + + batch := db.NewBatch() + defer batch.Close() + + done := make(chan struct{}) + batch.AddCallback(func(err error) { + require.NoError(t, err) + close(done) + }) + + err = latest.BatchSet(newResultID, newHeight, batch) + require.NoError(t, err) + + err = batch.Commit() + require.NoError(t, err) + + unittest.RequireCloseBefore(t, done, 100*time.Millisecond, "callback not called") + + actualResultID, actualHeight := latest.Latest() + + assert.Equal(t, newResultID, actualResultID) + assert.Equal(t, newHeight, actualHeight) + }) + }) + + t.Run("batch update error during BatchSetProcessedIndex", func(t *testing.T) { + expectedErr := fmt.Errorf("could not set processed index") + + var callbackCalled sync.WaitGroup + callbackCalled.Add(1) + + mockBatch := storagemock.NewReaderBatchWriter(t) + mockBatch.On("AddCallback", mock.AnythingOfType("func(error)")).Run(func(args mock.Arguments) { + callback := args.Get(0).(func(error)) + callback(expectedErr) + callbackCalled.Done() + }) + + mockCP := storagemock.NewConsumerProgress(t) + mockCP.On("ProcessedIndex").Return(initialHeader.Height, nil) + mockCP.On("BatchSetProcessedIndex", newHeight, mockBatch).Return(expectedErr) + + latest, err := NewLatestPersistedSealedResult(mockCP, mockHeaders, mockResults) + require.NoError(t, err) + + err = latest.BatchSet(newResultID, newHeight, mockBatch) + assert.ErrorIs(t, err, expectedErr) + + callbackCalled.Wait() + + actualResultID, actualHeight := latest.Latest() + + assert.Equal(t, initialResult.ID(), actualResultID) + assert.Equal(t, initialHeader.Height, actualHeight) + }) +} + +// TestLatestPersistedSealedResult_ConcurrentAccess tests the thread safety of the implementation. +// It verifies that: +// - Multiple concurrent reads are safe +// - Concurrent reads and writes are properly synchronized +// - No data races occur under heavy concurrent load +// - The state remains consistent during concurrent operations +func TestLatestPersistedSealedResult_ConcurrentAccess(t *testing.T) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + + initialHeader, initialResult, mockHeaders, mockResults := getHeadersResults(t, 100) + + initializer := NewConsumerProgress(db, "test_consumer") + progress, err := initializer.Initialize(initialHeader.Height) + require.NoError(t, err) + + latest, err := NewLatestPersistedSealedResult(progress, mockHeaders, mockResults) + require.NoError(t, err) + + t.Run("concurrent reads", func(t *testing.T) { + var wg sync.WaitGroup + numGoroutines := 1000 + + for range numGoroutines { + wg.Add(1) + go func() { + defer wg.Done() + + actualResultID, actualHeight := latest.Latest() + + assert.Equal(t, initialResult.ID(), actualResultID) + assert.Equal(t, initialHeader.Height, actualHeight) + }() + } + + wg.Wait() + }) + + t.Run("concurrent read/write", func(t *testing.T) { + var wg sync.WaitGroup + numGoroutines := 1000 + + for i := 0; i < numGoroutines; i++ { + wg.Add(2) + go func(i int) { + defer wg.Done() + + batch := db.NewBatch() + defer batch.Close() + + newResultID := unittest.IdentifierFixture() + newHeight := uint64(200 + i) + err := latest.BatchSet(newResultID, newHeight, batch) + require.NoError(t, err) + + err = batch.Commit() + require.NoError(t, err) + }(i) + go func() { + defer wg.Done() + _, _ = latest.Latest() + }() + } + + wg.Wait() + }) + }) +} + +func getHeadersResults(t *testing.T, initialHeight uint64) (*flow.Header, *flow.ExecutionResult, *storagemock.Headers, *storagemock.ExecutionResults) { + header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(initialHeight)) + result := unittest.ExecutionResultFixture(func(result *flow.ExecutionResult) { + result.BlockID = header.ID() + }) + + mockHeaders := storagemock.NewHeaders(t) + mockHeaders.On("ByHeight", initialHeight).Return(header, nil).Maybe() + + mockResults := storagemock.NewExecutionResults(t) + mockResults.On("ByBlockID", result.BlockID).Return(result, nil).Maybe() + + return header, result, mockHeaders, mockResults +} diff --git a/storage/store/my_receipts.go b/storage/store/my_receipts.go index 71734d00720..54eb45333a0 100644 --- a/storage/store/my_receipts.go +++ b/storage/store/my_receipts.go @@ -3,10 +3,12 @@ package store import ( "errors" "fmt" - "sync" + + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation" @@ -20,56 +22,11 @@ type MyExecutionReceipts struct { genericReceipts storage.ExecutionReceipts db storage.DB cache *Cache[flow.Identifier, *flow.ExecutionReceipt] - // preventing dirty reads when checking if a different my receipt has been - // indexed for the same block - indexingMyReceipt *sync.Mutex } // NewMyExecutionReceipts creates instance of MyExecutionReceipts which is a wrapper wrapper around badger.ExecutionReceipts // It's useful for execution nodes to keep track of produced execution receipts. func NewMyExecutionReceipts(collector module.CacheMetrics, db storage.DB, receipts storage.ExecutionReceipts) *MyExecutionReceipts { - indexingMyReceipt := new(sync.Mutex) - - store := func(rw storage.ReaderBatchWriter, blockID flow.Identifier, receipt *flow.ExecutionReceipt) error { - // the lock guarantees that no other thread can concurrently update the index. - // Note, we should not unlock the lock after this function returns, because the data is not yet persisted, the result - // of whether there was a different own receipt for the same block might be stale, therefore, we should not unlock - // the lock until the batch is committed. - - // the lock would not cause any deadlock, if - // 1) there is no other lock in the batch operation. - // 2) or there is other lock in the batch operation, but the locks are acquired and released in the same order. - rw.Lock(indexingMyReceipt) - - // assemble DB operations to store receipt (no execution) - err := receipts.BatchStore(receipt, rw) - if err != nil { - return err - } - - // assemble DB operations to index receipt as one of my own (no execution) - receiptID := receipt.ID() - - var savedReceiptID flow.Identifier - err = operation.LookupOwnExecutionReceipt(rw.GlobalReader(), blockID, &savedReceiptID) - if err == nil { - if savedReceiptID == receiptID { - // if we are storing same receipt we shouldn't error - return nil - } - - return fmt.Errorf("indexing my receipt %v failed: different receipt %v for the same block %v is already indexed", receiptID, - savedReceiptID, blockID) - } - - // exception - if !errors.Is(err, storage.ErrNotFound) { - return err - } - - return operation.IndexOwnExecutionReceipt(rw.Writer(), blockID, receiptID) - } - retrieve := func(r storage.Reader, blockID flow.Identifier) (*flow.ExecutionReceipt, error) { var receiptID flow.Identifier err := operation.LookupOwnExecutionReceipt(r, blockID, &receiptID) @@ -84,7 +41,6 @@ func NewMyExecutionReceipts(collector module.CacheMetrics, db storage.DB, receip } remove := func(rw storage.ReaderBatchWriter, blockID flow.Identifier) error { - rw.Lock(indexingMyReceipt) return operation.RemoveOwnExecutionReceipt(rw.Writer(), blockID) } @@ -93,11 +49,9 @@ func NewMyExecutionReceipts(collector module.CacheMetrics, db storage.DB, receip db: db, cache: newCache(collector, metrics.ResourceMyReceipt, withLimit[flow.Identifier, *flow.ExecutionReceipt](flow.DefaultTransactionExpiry+100), - withStore(store), withRetrieve(retrieve), withRemove[flow.Identifier, *flow.ExecutionReceipt](remove), ), - indexingMyReceipt: indexingMyReceipt, } } @@ -111,8 +65,42 @@ func (m *MyExecutionReceipts) myReceipt(blockID flow.Identifier) (*flow.Executio // If entity fails marshalling, the error is wrapped in a generic error and returned. // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. // If a different my receipt has been indexed for the same block, the error is wrapped in a generic error and returned. -func (m *MyExecutionReceipts) BatchStoreMyReceipt(receipt *flow.ExecutionReceipt, rw storage.ReaderBatchWriter) error { - return m.cache.PutTx(rw, receipt.ExecutionResult.BlockID, receipt) +func (m *MyExecutionReceipts) BatchStoreMyReceipt(lctx lockctx.Proof, receipt *flow.ExecutionReceipt, rw storage.ReaderBatchWriter) error { + receiptID := receipt.ID() + blockID := receipt.ExecutionResult.BlockID + + if lctx == nil || !lctx.HoldsLock(storage.LockInsertOwnReceipt) { + return fmt.Errorf("cannot store my receipt, missing lock %v", storage.LockInsertOwnReceipt) + } + + // add DB operation to batch for storing receipt (execution deferred until batch is committed) + err := m.genericReceipts.BatchStore(receipt, rw) + if err != nil { + return err + } + + // dd DB operation to batch for indexing receipt as one of my own (execution deferred until batch is committed) + var savedReceiptID flow.Identifier + err = operation.LookupOwnExecutionReceipt(rw.GlobalReader(), blockID, &savedReceiptID) + if err == nil { + if savedReceiptID == receiptID { + return nil // no-op we are storing *same* receipt + } + return fmt.Errorf("indexing my receipt %v failed: different receipt %v for the same block %v is already indexed", receiptID, savedReceiptID, blockID) + } + if !errors.Is(err, storage.ErrNotFound) { // `storage.ErrNotFound` is expected, as this indicates that no receipt is indexed yet; anything else is an exception + return irrecoverable.NewException(err) + } + err = operation.IndexOwnExecutionReceipt(rw.Writer(), blockID, receiptID) + if err != nil { + return err + } + + // TODO: ideally, adding the receipt to the cache on success, should be done by the cache itself + storage.OnCommitSucceed(rw, func() { + m.cache.Insert(blockID, receipt) + }) + return nil } // MyReceipt retrieves my receipt for the given block. diff --git a/storage/store/my_receipts_test.go b/storage/store/my_receipts_test.go index d1ef4d4857b..65e8f41a410 100644 --- a/storage/store/my_receipts_test.go +++ b/storage/store/my_receipts_test.go @@ -5,6 +5,7 @@ import ( "sync" "testing" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/module/metrics" @@ -15,63 +16,75 @@ import ( ) func TestMyExecutionReceiptsStorage(t *testing.T) { - withStore := func(t *testing.T, f func(storage.MyExecutionReceipts, storage.ExecutionResults, storage.ExecutionReceipts, storage.DB)) { + withStore := func(t *testing.T, f func(storage.MyExecutionReceipts, storage.ExecutionResults, storage.ExecutionReceipts, storage.DB, lockctx.Manager)) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() results := store.NewExecutionResults(metrics, db) receipts := store.NewExecutionReceipts(metrics, db, results, 100) myReceipts := store.NewMyExecutionReceipts(metrics, db, receipts) - f(myReceipts, results, receipts, db) + f(myReceipts, results, receipts, db, lockManager) }) } - t.Run("myReceipts one get one", func(t *testing.T) { - withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB) { + t.Run("myReceipts store and retrieve from different storage layers", func(t *testing.T) { + withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB, lockManager lockctx.Manager) { block := unittest.BlockFixture() receipt1 := unittest.ReceiptForBlockFixture(&block) + // STEP 1: Store receipt + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return myReceipts.BatchStoreMyReceipt(receipt1, rw) + return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) }) require.NoError(t, err) + defer lctx.Release() // While still holding the lock, retrieve values; this verifies that reads are not blocked by acquired locks + // STEP 2: Retrieve from different storage layers + // MyExecutionReceipts delegates the storage of the receipt to the more generic storage.ExecutionReceipts and storage.ExecutionResults, + // which is also used by the consensus follower to store execution receipts & results that are incorporated into blocks. + // After storing my receipts, we check that the result and receipt can also be retrieved from the lower-level generic storage layers. actual, err := myReceipts.MyReceipt(block.ID()) require.NoError(t, err) - require.Equal(t, receipt1, actual) - // Check after storing my receipts, the result and receipt are stored - actualReceipt, err := receipts.ByID(receipt1.ID()) + actualReceipt, err := receipts.ByID(receipt1.ID()) // generic receipts storage require.NoError(t, err) require.Equal(t, receipt1, actualReceipt) - actualResult, err := results.ByID(receipt1.ExecutionResult.ID()) + actualResult, err := results.ByID(receipt1.ExecutionResult.ID()) // generic results storage require.NoError(t, err) require.Equal(t, receipt1.ExecutionResult, *actualResult) }) }) - t.Run("myReceipts same for the same block", func(t *testing.T) { - withStore(t, func(myReceipts storage.MyExecutionReceipts, _ storage.ExecutionResults, _ storage.ExecutionReceipts, db storage.DB) { + t.Run("myReceipts store identical receipt for the same block", func(t *testing.T) { + withStore(t, func(myReceipts storage.MyExecutionReceipts, _ storage.ExecutionResults, _ storage.ExecutionReceipts, db storage.DB, lockManager lockctx.Manager) { block := unittest.BlockFixture() - receipt1 := unittest.ReceiptForBlockFixture(&block) + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return myReceipts.BatchStoreMyReceipt(receipt1, rw) + return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) }) require.NoError(t, err) + lctx.Release() + lctx2 := lockManager.NewContext() + require.NoError(t, lctx2.AcquireLock(storage.LockInsertOwnReceipt)) err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return myReceipts.BatchStoreMyReceipt(receipt1, rw) + return myReceipts.BatchStoreMyReceipt(lctx2, receipt1, rw) }) require.NoError(t, err) + lctx2.Release() }) }) t.Run("store different receipt for same block should fail", func(t *testing.T) { - withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB) { + withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB, lockManager lockctx.Manager) { block := unittest.BlockFixture() executor1 := unittest.IdentifierFixture() @@ -80,22 +93,27 @@ func TestMyExecutionReceiptsStorage(t *testing.T) { receipt1 := unittest.ReceiptForBlockExecutorFixture(&block, executor1) receipt2 := unittest.ReceiptForBlockExecutorFixture(&block, executor2) + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return myReceipts.BatchStoreMyReceipt(receipt1, rw) + return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) }) require.NoError(t, err) + lctx.Release() + lctx2 := lockManager.NewContext() + require.NoError(t, lctx2.AcquireLock(storage.LockInsertOwnReceipt)) err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return myReceipts.BatchStoreMyReceipt(receipt2, rw) + return myReceipts.BatchStoreMyReceipt(lctx2, receipt2, rw) }) - require.Error(t, err) require.Contains(t, err.Error(), "different receipt") + lctx2.Release() }) }) t.Run("concurrent store different receipt for same block should fail", func(t *testing.T) { - withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB) { + withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB, lockManager lockctx.Manager) { block := unittest.BlockFixture() executor1 := unittest.IdentifierFixture() @@ -104,30 +122,43 @@ func TestMyExecutionReceiptsStorage(t *testing.T) { receipt1 := unittest.ReceiptForBlockExecutorFixture(&block, executor1) receipt2 := unittest.ReceiptForBlockExecutorFixture(&block, executor2) - var wg sync.WaitGroup + var startSignal sync.WaitGroup // goroutines attempting store operations will wait for this signal to start concurrently + startSignal.Add(1) // expecting one signal from the main thread to start both goroutines + var doneSinal sync.WaitGroup // the main thread will wait on this for both goroutines to finish + doneSinal.Add(2) // expecting two goroutines to signal finish errChan := make(chan error, 2) - wg.Add(2) - go func() { - defer wg.Done() + lctx := lockManager.NewContext() + + startSignal.Wait() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return myReceipts.BatchStoreMyReceipt(receipt1, rw) + return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) }) errChan <- err + lctx.Release() + doneSinal.Done() }() go func() { - defer wg.Done() + lctx := lockManager.NewContext() + + startSignal.Wait() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return myReceipts.BatchStoreMyReceipt(receipt2, rw) + return myReceipts.BatchStoreMyReceipt(lctx, receipt2, rw) }) errChan <- err + lctx.Release() + doneSinal.Done() }() - wg.Wait() + startSignal.Done() // start both goroutines + doneSinal.Wait() // wait for both goroutines to finish close(errChan) + // Check that one of the Index operations succeeded and the other failed var errCount int for err := range errChan { if err != nil { @@ -135,35 +166,39 @@ func TestMyExecutionReceiptsStorage(t *testing.T) { require.Contains(t, err.Error(), "different receipt") } } - require.Equal(t, 1, errCount, "Exactly one of the operations should fail") }) }) t.Run("concurrent store of 10 different receipts for different blocks should succeed", func(t *testing.T) { - withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB) { - var wg sync.WaitGroup + withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB, lockManager lockctx.Manager) { + var startSignal sync.WaitGroup // goroutines attempting store operations will wait for this signal to start concurrently + startSignal.Add(1) // expecting one signal from the main thread to start both goroutines + var doneSinal sync.WaitGroup // the main thread will wait on this for goroutines attempting store operations to finish errChan := make(chan error, 10) // Store receipts concurrently for i := 0; i < 10; i++ { - wg.Add(1) + doneSinal.Add(1) go func(i int) { - defer wg.Done() - block := unittest.BlockFixture() // Each iteration gets a new block executor := unittest.IdentifierFixture() receipt := unittest.ReceiptForBlockExecutorFixture(&block, executor) + lctx := lockManager.NewContext() + startSignal.Wait() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return myReceipts.BatchStoreMyReceipt(receipt, rw) + return myReceipts.BatchStoreMyReceipt(lctx, receipt, rw) }) - errChan <- err + lctx.Release() + doneSinal.Done() }(i) } - wg.Wait() + startSignal.Done() // start both goroutines + doneSinal.Wait() // wait for both goroutines to finish close(errChan) // Verify all succeeded @@ -174,12 +209,15 @@ func TestMyExecutionReceiptsStorage(t *testing.T) { }) t.Run("store and remove", func(t *testing.T) { - withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB) { + withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB, lockManager lockctx.Manager) { block := unittest.BlockFixture() receipt1 := unittest.ReceiptForBlockFixture(&block) + lctx := lockManager.NewContext() + defer lctx.Release() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return myReceipts.BatchStoreMyReceipt(receipt1, rw) + return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) }) require.NoError(t, err) @@ -210,6 +248,7 @@ func TestMyExecutionReceiptsStorage(t *testing.T) { func TestMyExecutionReceiptsStorageMultipleStoreInSameBatch(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() results := store.NewExecutionResults(metrics, db) receipts := store.NewExecutionReceipts(metrics, db, results, 100) @@ -219,12 +258,15 @@ func TestMyExecutionReceiptsStorageMultipleStoreInSameBatch(t *testing.T) { receipt1 := unittest.ReceiptForBlockFixture(&block) receipt2 := unittest.ReceiptForBlockFixture(&block) + lctx := lockManager.NewContext() + defer lctx.Release() + require.NoError(t, lctx.AcquireLock(storage.LockInsertOwnReceipt)) err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - err := myReceipts.BatchStoreMyReceipt(receipt1, rw) + err := myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) if err != nil { return err } - return myReceipts.BatchStoreMyReceipt(receipt2, rw) + return myReceipts.BatchStoreMyReceipt(lctx, receipt2, rw) }) require.NoError(t, err) }) diff --git a/storage/store/payloads.go b/storage/store/payloads.go new file mode 100644 index 00000000000..b88cc285032 --- /dev/null +++ b/storage/store/payloads.go @@ -0,0 +1,152 @@ +package store + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +type Payloads struct { + db storage.DB + index *Index + guarantees *Guarantees + seals *Seals + receipts *ExecutionReceipts + results *ExecutionResults +} + +func NewPayloads(db storage.DB, index *Index, guarantees *Guarantees, seals *Seals, receipts *ExecutionReceipts, + results *ExecutionResults) *Payloads { + + p := &Payloads{ + db: db, + index: index, + guarantees: guarantees, + seals: seals, + receipts: receipts, + results: results, + } + + return p +} + +// storeTx stores the payloads and their components in the database. +// it takes a map of storingResults to ensure the receipt to be stored contains a known result, +// which is either already stored in the database or is going to be stored in the same batch. +func (p *Payloads) storeTx(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, payload *flow.Payload) error { + // For correct payloads, the execution result is part of the payload or it's already stored + // in storage. If execution result is not present in either of those places, we error: + resultsByID := payload.Results.Lookup() + fullReceipts := make([]*flow.ExecutionReceipt, 0, len(payload.Receipts)) + var err error + for _, meta := range payload.Receipts { + result, ok := resultsByID[meta.ResultID] + if !ok { + result, err = p.results.ByID(meta.ResultID) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("invalid payload referencing unknown execution result %v, err: %w", meta.ResultID, err) + } + return err + } + } + fullReceipts = append(fullReceipts, flow.ExecutionReceiptFromMeta(*meta, *result)) + } + + // make sure all payload guarantees are stored + for _, guarantee := range payload.Guarantees { + err := p.guarantees.storeTx(lctx, rw, guarantee) + if err != nil { + return fmt.Errorf("could not store guarantee: %w", err) + } + } + + // make sure all payload seals are stored + for _, seal := range payload.Seals { + err := p.seals.storeTx(rw, seal) + if err != nil { + return fmt.Errorf("could not store seal: %w", err) + } + } + + // store all payload receipts + for _, receipt := range fullReceipts { + err := p.receipts.storeTx(rw, receipt) + if err != nil { + return fmt.Errorf("could not store receipt: %w", err) + } + } + + // store the index + err = p.index.storeTx(lctx, rw, blockID, payload.Index()) + if err != nil { + return fmt.Errorf("could not store index: %w", err) + } + + return nil +} + +func (p *Payloads) retrieveTx(blockID flow.Identifier) (*flow.Payload, error) { + // retrieve the index + idx, err := p.index.ByBlockID(blockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve index: %w", err) + } + + // retrieve guarantees + guarantees := make([]*flow.CollectionGuarantee, 0, len(idx.CollectionIDs)) + for _, collID := range idx.CollectionIDs { + guarantee, err := p.guarantees.retrieveTx(collID) + if err != nil { + return nil, fmt.Errorf("could not retrieve guarantee (%x): %w", collID, err) + } + guarantees = append(guarantees, guarantee) + } + + // retrieve seals + seals := make([]*flow.Seal, 0, len(idx.SealIDs)) + for _, sealID := range idx.SealIDs { + seal, err := p.seals.retrieveTx(sealID) + if err != nil { + return nil, fmt.Errorf("could not retrieve seal (%x): %w", sealID, err) + } + seals = append(seals, seal) + } + + // retrieve receipts + receipts := make([]*flow.ExecutionReceiptMeta, 0, len(idx.ReceiptIDs)) + for _, recID := range idx.ReceiptIDs { + receipt, err := p.receipts.byID(recID) + if err != nil { + return nil, fmt.Errorf("could not retrieve receipt %x: %w", recID, err) + } + receipts = append(receipts, receipt.Meta()) + } + + // retrieve results + results := make([]*flow.ExecutionResult, 0, len(idx.ResultIDs)) + for _, resID := range idx.ResultIDs { + result, err := p.results.byID(resID) + if err != nil { + return nil, fmt.Errorf("could not retrieve result %x: %w", resID, err) + } + results = append(results, result) + } + payload := &flow.Payload{ + Seals: seals, + Guarantees: guarantees, + Receipts: receipts, + Results: results, + ProtocolStateID: idx.ProtocolStateID, + } + + return payload, nil +} + +func (p *Payloads) ByBlockID(blockID flow.Identifier) (*flow.Payload, error) { + return p.retrieveTx(blockID) +} diff --git a/storage/store/payloads_test.go b/storage/store/payloads_test.go new file mode 100644 index 00000000000..b51863ac3ce --- /dev/null +++ b/storage/store/payloads_test.go @@ -0,0 +1,63 @@ +package store_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestPayloadStoreRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + + all := store.InitAll(metrics, db) + payloads := all.Payloads + blocks := all.Blocks + + expected := unittest.PayloadFixture(unittest.WithAllTheFixins) + block := unittest.BlockWithParentFixture(unittest.BlockHeaderWithHeight(10)) + block.SetPayload(expected) + require.Equal(t, &expected, block.Payload) + blockID := block.ID() + + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(t, err) + defer lctx.Release() + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, block) + }) + require.NoError(t, err) + + // fetch payload + payload, err := payloads.ByBlockID(blockID) + require.NoError(t, err) + require.Equal(t, &expected, payload) + }) +} + +func TestPayloadRetreiveWithoutStore(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + + index := store.NewIndex(metrics, db) + seals := store.NewSeals(metrics, db) + guarantees := store.NewGuarantees(metrics, db, store.DefaultCacheSize) + results := store.NewExecutionResults(metrics, db) + receipts := store.NewExecutionReceipts(metrics, db, results, store.DefaultCacheSize) + s := store.NewPayloads(db, index, guarantees, seals, receipts, results) + + blockID := unittest.IdentifierFixture() + + _, err := s.ByBlockID(blockID) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} diff --git a/storage/store/protocol_kv_store.go b/storage/store/protocol_kv_store.go new file mode 100644 index 00000000000..4b5745af525 --- /dev/null +++ b/storage/store/protocol_kv_store.go @@ -0,0 +1,192 @@ +package store + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// DefaultProtocolKVStoreCacheSize is the default size for primary protocol KV store cache. +// KV store is rarely updated, so we will have a limited number of unique snapshots. +// Let's be generous and assume we have 10 different KV stores used at the same time. +var DefaultProtocolKVStoreCacheSize uint = 10 + +// DefaultProtocolKVStoreByBlockIDCacheSize is the default value for secondary index `byBlockIdCache`. +// We want to be able to cover a broad interval of views without cache misses, so we use a bigger value. +// Generally, many blocks will reference the same KV store snapshot. +var DefaultProtocolKVStoreByBlockIDCacheSize uint = 1000 + +// ProtocolKVStore implements persistent storage for storing KV store snapshots. +type ProtocolKVStore struct { + db storage.DB + + // cache holds versioned binary blobs representing snapshots of key-value stores. We use the kv-store's + // ID as key for retrieving the versioned binary snapshot of the kv-store. Consumers must know how to + // deal with the binary representation. `cache` only holds the distinct snapshots. On the happy path, + // we expect single-digit number of unique snapshots within an epoch. + cache *Cache[flow.Identifier, *flow.PSKeyValueStoreData] + + // byBlockIdCache is essentially an in-memory map from `Block.ID()` -> `KeyValueStore.ID()`. The full + // kv-store snapshot can be retrieved from the `cache` above. + // `byBlockIdCache` will contain an entry for every block. We want to be able to cover a broad interval of views + // without cache misses, so a cache size of roughly 1000 entries is reasonable. + byBlockIdCache *Cache[flow.Identifier, flow.Identifier] +} + +var _ storage.ProtocolKVStore = (*ProtocolKVStore)(nil) + +// NewProtocolKVStore creates a ProtocolKVStore instance, which is a database holding KV store snapshots. +// It supports storing, caching and retrieving by ID or the additionally indexed block ID. +func NewProtocolKVStore(collector module.CacheMetrics, + db storage.DB, + kvStoreCacheSize uint, + kvStoreByBlockIDCacheSize uint, +) *ProtocolKVStore { + retrieveByStateID := func(r storage.Reader, stateID flow.Identifier) (*flow.PSKeyValueStoreData, error) { + var kvStore flow.PSKeyValueStoreData + err := operation.RetrieveProtocolKVStore(r, stateID, &kvStore) + if err != nil { + return nil, fmt.Errorf("could not get kv snapshot by id (%x): %w", stateID, err) + } + return &kvStore, nil + } + storeByStateID := func(rw storage.ReaderBatchWriter, stateID flow.Identifier, data *flow.PSKeyValueStoreData) error { + return operation.InsertProtocolKVStore(rw.Writer(), stateID, data) + } + + storeByBlockID := func(rw storage.ReaderBatchWriter, blockID flow.Identifier, stateID flow.Identifier) error { + err := operation.IndexProtocolKVStore(rw.Writer(), blockID, stateID) + if err != nil { + return fmt.Errorf("could not index protocol state for block (%x): %w", blockID[:], err) + } + return nil + } + + retrieveByBlockID := func(r storage.Reader, blockID flow.Identifier) (flow.Identifier, error) { + var stateID flow.Identifier + err := operation.LookupProtocolKVStore(r, blockID, &stateID) + if err != nil { + return flow.ZeroID, fmt.Errorf("could not lookup protocol state ID for block (%x): %w", blockID[:], err) + } + return stateID, nil + } + + return &ProtocolKVStore{ + db: db, + cache: newCache(collector, metrics.ResourceProtocolKVStore, + withLimit[flow.Identifier, *flow.PSKeyValueStoreData](kvStoreCacheSize), + withStore(storeByStateID), + withRetrieve(retrieveByStateID)), + byBlockIdCache: newCache(collector, metrics.ResourceProtocolKVStoreByBlockID, + withLimit[flow.Identifier, flow.Identifier](kvStoreByBlockIDCacheSize), + withStore(storeByBlockID), + withRetrieve(retrieveByBlockID)), + } +} + +// BatchStore persists the KV-store snapshot in the database using the given ID as key. +// BatchStore is idempotent, i.e. it accepts repeated calls with the same pairs of (stateID, kvStore). +// Here, the ID is expected to be a collision-resistant hash of the snapshot (including the +// ProtocolStateVersion). Hence, for the same ID, BatchStore will reject changing the data. +// Expected errors during normal operations: +// - [storage.ErrDataMismatch] if a _different_ KV store for the given stateID has already been persisted +func (s *ProtocolKVStore) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, stateID flow.Identifier, data *flow.PSKeyValueStoreData) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertBlock) + } + + existingData, err := s.ByID(stateID) + if err == nil { + if existingData.Equal(data) { + return nil + } + + return fmt.Errorf("kv-store snapshot with id (%x) already exists but different, ([%v,%x] != [%v,%x]): %w", stateID[:], + data.Version, data.Data, + existingData.Version, existingData.Data, + storage.ErrDataMismatch) + } + if !errors.Is(err, storage.ErrNotFound) { // `storage.ErrNotFound` is expected, as this indicates that no receipt is indexed yet; anything else is an exception + return fmt.Errorf("unexpected error checking if kv-store snapshot %x exists: %w", stateID[:], irrecoverable.NewException(err)) + } + + return s.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.cache.PutTx(rw, stateID, data) + }) +} + +// BatchIndex appends the following operation to the provided write batch: +// we extend the map from `blockID` to `stateID`, where `blockID` references the +// block that _proposes_ updated key-value store. +// BatchIndex is idempotent, i.e. it accepts repeated calls with the same pairs of (blockID , stateID). +// Per protocol convention, the block references the `stateID`. As the `blockID` is a collision-resistant hash, +// for the same `blockID`, BatchIndex will reject changing the data. +// Protocol convention: +// - Consider block B, whose ingestion might potentially lead to an updated KV store. For example, +// the KV store changes if we seal some execution results emitting specific service events. +// - For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store. +// - CAUTION: The updated state requires confirmation by a QC and will only become active at the child block, +// _after_ validating the QC. +// +// Expected errors during normal operations: +// - [storage.ErrDataMismatch] if a _different_ KV store for the given stateID has already been persisted +func (s *ProtocolKVStore) BatchIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, stateID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertBlock) + } + + existingStateID, err := s.byBlockIdCache.Get(s.db.Reader(), blockID) + if err == nil { + // no-op if the *same* stateID is already indexed for the blockID + if existingStateID == stateID { + return nil + } + + return fmt.Errorf("kv-store snapshot with block id (%x) already exists and different (%v != %v): %w", + blockID[:], + stateID, existingStateID, + storage.ErrDataMismatch) + } + if !errors.Is(err, storage.ErrNotFound) { // `storage.ErrNotFound` is expected, as this indicates that no receipt is indexed yet; anything else is an exception + return fmt.Errorf("could not check if kv-store snapshot with block id (%x) exists: %w", blockID[:], irrecoverable.NewException(err)) + } + + return s.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.byBlockIdCache.PutTx(rw, blockID, stateID) + }) +} + +// ByID retrieves the KV store snapshot with the given state ID. +// Expected errors during normal operations: +// - storage.ErrNotFound if no snapshot with the given Identifier is known. +func (s *ProtocolKVStore) ByID(stateID flow.Identifier) (*flow.PSKeyValueStoreData, error) { + return s.cache.Get(s.db.Reader(), stateID) +} + +// ByBlockID retrieves the kv-store snapshot that the block with the given ID proposes. +// CAUTION: this store snapshot requires confirmation by a QC and will only become active at the child block, +// _after_ validating the QC. Protocol convention: +// - Consider block B, whose ingestion might potentially lead to an updated KV store state. +// For example, the state changes if we seal some execution results emitting specific service events. +// - For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store. As value, +// the hash of the resulting state at the end of processing B is to be used. +// - CAUTION: The updated state requires confirmation by a QC and will only become active at the child block, +// _after_ validating the QC. +// +// Expected errors during normal operations: +// - storage.ErrNotFound if no snapshot has been indexed for the given block. +func (s *ProtocolKVStore) ByBlockID(blockID flow.Identifier) (*flow.PSKeyValueStoreData, error) { + stateID, err := s.byBlockIdCache.Get(s.db.Reader(), blockID) + if err != nil { + return nil, fmt.Errorf("could not lookup protocol state ID for block (%x): %w", blockID[:], err) + } + return s.cache.Get(s.db.Reader(), stateID) +} diff --git a/storage/store/protocol_kv_store_test.go b/storage/store/protocol_kv_store_test.go new file mode 100644 index 00000000000..3b42f20bd43 --- /dev/null +++ b/storage/store/protocol_kv_store_test.go @@ -0,0 +1,163 @@ +package store + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +// TesKeyValueStoreStorage tests if the KV store is stored, retrieved and indexed correctly +func TestKeyValueStoreStorage(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) + + expected := &flow.PSKeyValueStoreData{ + Version: 2, + Data: unittest.RandomBytes(32), + } + stateID := unittest.IdentifierFixture() + blockID := unittest.IdentifierFixture() + + // store protocol state and auxiliary info + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertBlock)) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := store.BatchStore(lctx, rw, stateID, expected) + require.NoError(t, err) + return store.BatchIndex(lctx, rw, blockID, stateID) + }) + require.NoError(t, err) + defer lctx.Release() // While still holding the lock, retrieve values; this verifies that reads are not blocked by acquired locks + + // fetch protocol state by its own ID + actual, err := store.ByID(stateID) + require.NoError(t, err) + assert.Equal(t, expected, actual) + + // fetch protocol state index by the block ID + actualByBlockID, err := store.ByBlockID(blockID) + require.NoError(t, err) + assert.Equal(t, expected, actualByBlockID) + }) +} + +// TestProtocolKVStore_StoreTx tests that StoreTx handles storage request correctly, when a snapshot with +// the given id has already been stored: +// - if the KV-store snapshot is exactly the same as the one already stored (incl. the version), `BatchStore` should return without an error +// - if we request to store a _different_ KV-store snapshot, an `storage.ErrDataMismatch` should be returned. +func TestProtocolKVStore_StoreTx(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) + + stateID := unittest.IdentifierFixture() + expected := &flow.PSKeyValueStoreData{ + Version: 2, + Data: unittest.RandomBytes(32), + } + + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertBlock)) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchStore(lctx, rw, stateID, expected) + }) + require.NoError(t, err) + defer lctx.Release() // While still holding the lock, retrieve values; this verifies that reads are not blocked by acquired locks + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchStore(lctx, rw, stateID, expected) + }) + // No error when storing same data again + require.NoError(t, err) + + // Attempt to store different data with the same stateID + dataDifferent := &flow.PSKeyValueStoreData{ + Version: 2, + Data: unittest.RandomBytes(32), + } + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchStore(lctx, rw, stateID, dataDifferent) + }) + require.ErrorIs(t, err, storage.ErrDataMismatch) + + // Attempt to store different version with the same stateID + versionDifferent := &flow.PSKeyValueStoreData{ + Version: 3, + Data: expected.Data, + } + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchStore(lctx, rw, stateID, versionDifferent) + }) + require.ErrorIs(t, err, storage.ErrDataMismatch) + }) +} + +// TestProtocolKVStore_IndexTx tests that IndexTx handles storage request correctly, when a snapshot with +// the given id has already been indexed: +// - if the KV-store ID is exactly the same as the one already indexed, `BatchIndex` should return without an error +// - if we request to index a different ID, an `storage.ErrDataMismatch` should be returned. +func TestProtocolKVStore_IndexTx(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) + + stateID := unittest.IdentifierFixture() + blockID := unittest.IdentifierFixture() + + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockInsertBlock)) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchIndex(lctx, rw, blockID, stateID) + }) + require.NoError(t, err) + defer lctx.Release() // While still holding the lock, retrieve values; this verifies that reads are not blocked by acquired locks + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchIndex(lctx, rw, blockID, stateID) + }) + require.NoError(t, err) + + differentStateID := unittest.IdentifierFixture() + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchIndex(lctx, rw, blockID, differentStateID) + }) + require.ErrorIs(t, err, storage.ErrDataMismatch) + }) +} + +// TestProtocolKVStore_ByBlockID tests that ByBlockID returns an error if no snapshot has been indexed for the given block. +func TestProtocolKVStore_ByBlockID(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) + + blockID := unittest.IdentifierFixture() + _, err := store.ByBlockID(blockID) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +// TestProtocolKVStore_ByID tests that ByID returns an error if no snapshot with the given Identifier is known. +func TestProtocolKVStore_ByID(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) + + stateID := unittest.IdentifierFixture() + _, err := store.ByID(stateID) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} diff --git a/storage/store/qcs.go b/storage/store/qcs.go new file mode 100644 index 00000000000..f5a9e57a9fd --- /dev/null +++ b/storage/store/qcs.go @@ -0,0 +1,63 @@ +package store + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// QuorumCertificates implements persistent storage for quorum certificates. +type QuorumCertificates struct { + db storage.DB + cache *Cache[flow.Identifier, *flow.QuorumCertificate] +} + +var _ storage.QuorumCertificates = (*QuorumCertificates)(nil) + +// NewQuorumCertificates Creates QuorumCertificates instance which is a database of quorum certificates +// which supports storing, caching and retrieving by block ID. +func NewQuorumCertificates(collector module.CacheMetrics, db storage.DB, cacheSize uint) *QuorumCertificates { + storeWithLock := func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, _ flow.Identifier, qc *flow.QuorumCertificate) error { + return operation.InsertQuorumCertificate(lctx, rw, qc) + } + + retrieve := func(r storage.Reader, blockID flow.Identifier) (*flow.QuorumCertificate, error) { + var qc flow.QuorumCertificate + err := operation.RetrieveQuorumCertificate(r, blockID, &qc) + return &qc, err + } + + return &QuorumCertificates{ + db: db, + cache: newCache(collector, metrics.ResourceQC, + withLimit[flow.Identifier, *flow.QuorumCertificate](cacheSize), + withStoreWithLock(storeWithLock), + withRetrieve(retrieve)), + } +} + +// BatchStore stores a Quorum Certificate as part of database batch update. QC is indexed by QC.BlockID. +// +// Note: For the same block, different QCs can easily be constructed by selecting different sub-sets of the received votes +// (provided more than the minimal number of consensus participants voted, which is typically the case). In most cases, it +// is only important that a block has been certified, but irrelevant who specifically contributed to the QC. Therefore, we +// only store the first QC. +// +// If *any* quorum certificate for QC.BlockID has already been stored, a `storage.ErrAlreadyExists` is returned (typically benign). +func (q *QuorumCertificates) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, qc *flow.QuorumCertificate) error { + return q.cache.PutWithLockTx(lctx, rw, qc.BlockID, qc) +} + +// ByBlockID returns QC that certifies the block referred by blockID. +// * storage.ErrNotFound if no QC for blockID doesn't exist. +func (q *QuorumCertificates) ByBlockID(blockID flow.Identifier) (*flow.QuorumCertificate, error) { + val, err := q.cache.Get(q.db.Reader(), blockID) + if err != nil { + return nil, err + } + return val, nil +} diff --git a/storage/store/qcs_test.go b/storage/store/qcs_test.go new file mode 100644 index 00000000000..474ea7a461e --- /dev/null +++ b/storage/store/qcs_test.go @@ -0,0 +1,107 @@ +package store_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestQuorumCertificates_StoreTx tests storing and retrieving of QC. +func TestQuorumCertificates_StoreTx(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + store := store.NewQuorumCertificates(metrics, db, 10) + qc := unittest.QuorumCertificateFixture() + + lctx := lockManager.NewContext() + defer lctx.Release() + require.NoError(t, lctx.AcquireLock(storage.LockInsertBlock)) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchStore(lctx, rw, qc) + }) + require.NoError(t, err) + + actual, err := store.ByBlockID(qc.BlockID) + require.NoError(t, err) + + require.Equal(t, qc, actual) + }) +} + +// TestQuorumCertificates_LockEnforced verifies that storing a QC requires holding the +// storage.LockInsertBlock lock. If the lock is not held, `BatchStore` should error. +func TestQuorumCertificates_LockEnforced(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + store := store.NewQuorumCertificates(metrics, db, 10) + qc := unittest.QuorumCertificateFixture() + + // acquire wrong lock and attempt to store QC: should error + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockFinalizeBlock)) // INCORRECT LOCK + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchStore(lctx, rw, qc) + }) + require.Error(t, err) + lctx.Release() + + // qc should not be stored, so ByBlockID should return `storage.ErrNotFound` + _, err = store.ByBlockID(qc.BlockID) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +// TestQuorumCertificates_StoreTx_OtherQC checks if storing other QC for same blockID results in +// `storage.ErrAlreadyExists` and already stored value is not overwritten. +func TestQuorumCertificates_StoreTx_OtherQC(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + s := store.NewQuorumCertificates(metrics, db, 10) + qc := unittest.QuorumCertificateFixture() + otherQC := unittest.QuorumCertificateFixture(func(otherQC *flow.QuorumCertificate) { + otherQC.View = qc.View + otherQC.BlockID = qc.BlockID + }) + + lctx := lockManager.NewContext() + defer lctx.Release() + require.NoError(t, lctx.AcquireLock(storage.LockInsertBlock)) + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(lctx, rw, qc) + }) + require.NoError(t, err) + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(lctx, rw, otherQC) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + + actual, err := s.ByBlockID(otherQC.BlockID) + require.NoError(t, err) + + require.Equal(t, qc, actual) + }) +} + +// TestQuorumCertificates_ByBlockID that ByBlockID returns correct sentinel error if no QC for given block ID has been found +func TestQuorumCertificates_ByBlockID(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store := store.NewQuorumCertificates(metrics, db, 10) + + actual, err := store.ByBlockID(unittest.IdentifierFixture()) + require.ErrorIs(t, err, storage.ErrNotFound) + require.Nil(t, actual) + }) +} diff --git a/storage/store/receipts.go b/storage/store/receipts.go index 679ab23a51b..a9b0362c999 100644 --- a/storage/store/receipts.go +++ b/storage/store/receipts.go @@ -1,7 +1,6 @@ package store import ( - "errors" "fmt" "github.com/onflow/flow-go/model/flow" @@ -78,7 +77,7 @@ func (r *ExecutionReceipts) byID(receiptID flow.Identifier) (*flow.ExecutionRece func (r *ExecutionReceipts) byBlockID(blockID flow.Identifier) ([]*flow.ExecutionReceipt, error) { var receiptIDs []flow.Identifier err := operation.LookupExecutionReceipts(r.db.Reader(), blockID, &receiptIDs) - if err != nil && !errors.Is(err, storage.ErrNotFound) { + if err != nil { return nil, fmt.Errorf("could not find receipt index for block: %w", err) } @@ -107,6 +106,9 @@ func (r *ExecutionReceipts) ByID(receiptID flow.Identifier) (*flow.ExecutionRece return r.byID(receiptID) } +// ByBlockID retrieves list of execution receipts from the storage +// +// No errors are expected errors during normal operations. func (r *ExecutionReceipts) ByBlockID(blockID flow.Identifier) (flow.ExecutionReceiptList, error) { return r.byBlockID(blockID) } diff --git a/storage/store/results.go b/storage/store/results.go index 696b6d114c8..45c269f5a7f 100644 --- a/storage/store/results.go +++ b/storage/store/results.go @@ -7,7 +7,6 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/transaction" "github.com/onflow/flow-go/storage/operation" ) @@ -117,13 +116,6 @@ func (r *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult return r.byID(resultID) } -// TODO: deprecated, should be removed when protocol data is moved pebble -func (r *ExecutionResults) ByIDTx(resultID flow.Identifier) func(tx *transaction.Tx) (*flow.ExecutionResult, error) { - return func(tx *transaction.Tx) (*flow.ExecutionResult, error) { - return nil, fmt.Errorf("not implemented") - } -} - // Index indexes an execution result by block ID. // Note: this method call is not concurrent safe, because it checks if the different result is already indexed // by the same blockID, and if it is, it returns an error. diff --git a/storage/badger/seals.go b/storage/store/seals.go similarity index 54% rename from storage/badger/seals.go rename to storage/store/seals.go index 064ce3d3d54..884e17d68aa 100644 --- a/storage/badger/seals.go +++ b/storage/store/seals.go @@ -1,34 +1,30 @@ -package badger +package store import ( "fmt" - "github.com/dgraph-io/badger/v2" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" ) type Seals struct { - db *badger.DB + db storage.DB cache *Cache[flow.Identifier, *flow.Seal] } -func NewSeals(collector module.CacheMetrics, db *badger.DB) *Seals { +func NewSeals(collector module.CacheMetrics, db storage.DB) *Seals { - store := func(sealID flow.Identifier, seal *flow.Seal) func(*transaction.Tx) error { - return transaction.WithTx(operation.SkipDuplicates(operation.InsertSeal(sealID, seal))) + store := func(rw storage.ReaderBatchWriter, sealID flow.Identifier, seal *flow.Seal) error { + return operation.InsertSeal(rw.Writer(), sealID, seal) } - retrieve := func(sealID flow.Identifier) func(*badger.Txn) (*flow.Seal, error) { - return func(tx *badger.Txn) (*flow.Seal, error) { - var seal flow.Seal - err := operation.RetrieveSeal(sealID, &seal)(tx) - return &seal, err - } + retrieve := func(r storage.Reader, sealID flow.Identifier) (*flow.Seal, error) { + var seal flow.Seal + err := operation.RetrieveSeal(r, sealID, &seal) + return &seal, err } s := &Seals{ @@ -42,28 +38,26 @@ func NewSeals(collector module.CacheMetrics, db *badger.DB) *Seals { return s } -func (s *Seals) storeTx(seal *flow.Seal) func(*transaction.Tx) error { - return s.cache.PutTx(seal.ID(), seal) +func (s *Seals) storeTx(rw storage.ReaderBatchWriter, seal *flow.Seal) error { + return s.cache.PutTx(rw, seal.ID(), seal) } -func (s *Seals) retrieveTx(sealID flow.Identifier) func(*badger.Txn) (*flow.Seal, error) { - return func(tx *badger.Txn) (*flow.Seal, error) { - val, err := s.cache.Get(sealID)(tx) - if err != nil { - return nil, err - } - return val, err +func (s *Seals) retrieveTx(sealID flow.Identifier) (*flow.Seal, error) { + val, err := s.cache.Get(s.db.Reader(), sealID) + if err != nil { + return nil, err } + return val, err } func (s *Seals) Store(seal *flow.Seal) error { - return operation.RetryOnConflictTx(s.db, transaction.Update, s.storeTx(seal)) + return s.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.storeTx(rw, seal) + }) } func (s *Seals) ByID(sealID flow.Identifier) (*flow.Seal, error) { - tx := s.db.NewTransaction(false) - defer tx.Discard() - return s.retrieveTx(sealID)(tx) + return s.retrieveTx(sealID) } // HighestInFork retrieves the highest seal that was included in the @@ -72,7 +66,7 @@ func (s *Seals) ByID(sealID flow.Identifier) (*flow.Seal, error) { // blockID is unknown. func (s *Seals) HighestInFork(blockID flow.Identifier) (*flow.Seal, error) { var sealID flow.Identifier - err := s.db.View(operation.LookupLatestSealAtBlock(blockID, &sealID)) + err := operation.LookupLatestSealAtBlock(s.db.Reader(), blockID, &sealID) if err != nil { return nil, fmt.Errorf("failed to retrieve seal for fork with head %x: %w", blockID, err) } @@ -84,7 +78,7 @@ func (s *Seals) HighestInFork(blockID flow.Identifier) (*flow.Seal, error) { // Returns storage.ErrNotFound if the block is unknown or unsealed. func (s *Seals) FinalizedSealForBlock(blockID flow.Identifier) (*flow.Seal, error) { var sealID flow.Identifier - err := s.db.View(operation.LookupBySealedBlockID(blockID, &sealID)) + err := operation.LookupBySealedBlockID(s.db.Reader(), blockID, &sealID) if err != nil { return nil, fmt.Errorf("failed to retrieve seal for block %x: %w", blockID, err) } diff --git a/storage/badger/seals_test.go b/storage/store/seals_test.go similarity index 56% rename from storage/badger/seals_test.go rename to storage/store/seals_test.go index 7c8aa3907b8..b9c4961c803 100644 --- a/storage/badger/seals_test.go +++ b/storage/store/seals_test.go @@ -1,73 +1,80 @@ -package badger_test +package store_test import ( "testing" - "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" ) func TestRetrieveWithoutStore(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() - store := badgerstorage.NewSeals(metrics, db) + s := store.NewSeals(metrics, db) - _, err := store.ByID(unittest.IdentifierFixture()) + _, err := s.ByID(unittest.IdentifierFixture()) require.ErrorIs(t, err, storage.ErrNotFound) - _, err = store.HighestInFork(unittest.IdentifierFixture()) + _, err = s.HighestInFork(unittest.IdentifierFixture()) require.ErrorIs(t, err, storage.ErrNotFound) }) } // TestSealStoreRetrieve verifies that a seal can be stored and retrieved by its ID func TestSealStoreRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() - store := badgerstorage.NewSeals(metrics, db) + s := store.NewSeals(metrics, db) expected := unittest.Seal.Fixture() // store seal - err := store.Store(expected) + err := s.Store(expected) require.NoError(t, err) // retrieve seal - seal, err := store.ByID(expected.ID()) + seal, err := s.ByID(expected.ID()) require.NoError(t, err) require.Equal(t, expected, seal) }) } // TestSealIndexAndRetrieve verifies that: -// - for a block, we can store (aka index) the latest sealed block along this fork. +// - for a block, we can s (aka index) the latest sealed block along this fork. // // Note: indexing the seal for a block is currently implemented only through a direct // Badger operation. The Seals mempool only supports retrieving the latest sealed block. func TestSealIndexAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + lctx := lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertBlock) + require.NoError(t, err) + defer lctx.Release() + metrics := metrics.NewNoopCollector() - store := badgerstorage.NewSeals(metrics, db) + s := store.NewSeals(metrics, db) expectedSeal := unittest.Seal.Fixture() blockID := unittest.IdentifierFixture() // store the seal first - err := store.Store(expectedSeal) + err = s.Store(expectedSeal) require.NoError(t, err) // index the seal ID for the heighest sealed block in this fork - err = operation.RetryOnConflict(db.Update, operation.IndexLatestSealAtBlock(blockID, expectedSeal.ID())) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexLatestSealAtBlock(lctx, rw.Writer(), blockID, expectedSeal.ID()) + }) require.NoError(t, err) // retrieve latest seal - seal, err := store.HighestInFork(blockID) + seal, err := s.HighestInFork(blockID) require.NoError(t, err) require.Equal(t, expectedSeal, seal) }) @@ -76,24 +83,26 @@ func TestSealIndexAndRetrieve(t *testing.T) { // TestSealedBlockIndexAndRetrieve checks after indexing a seal by a sealed block ID, it can be // retrieved by the sealed block ID func TestSealedBlockIndexAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() - store := badgerstorage.NewSeals(metrics, db) + s := store.NewSeals(metrics, db) expectedSeal := unittest.Seal.Fixture() blockID := unittest.IdentifierFixture() expectedSeal.BlockID = blockID // store the seal first - err := store.Store(expectedSeal) + err := s.Store(expectedSeal) require.NoError(t, err) // index the seal ID for the highest sealed block in this fork - err = operation.RetryOnConflict(db.Update, operation.IndexFinalizedSealByBlockID(expectedSeal.BlockID, expectedSeal.ID())) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedSealByBlockID(rw.Writer(), expectedSeal.BlockID, expectedSeal.ID()) + }) require.NoError(t, err) // retrieve latest seal - seal, err := store.FinalizedSealForBlock(blockID) + seal, err := s.FinalizedSealForBlock(blockID) require.NoError(t, err) require.Equal(t, expectedSeal, seal) }) diff --git a/storage/store/transaction_result_error_messages.go b/storage/store/transaction_result_error_messages.go index 4c3ce14dd9b..6f315216aea 100644 --- a/storage/store/transaction_result_error_messages.go +++ b/storage/store/transaction_result_error_messages.go @@ -76,7 +76,7 @@ func NewTransactionResultErrorMessages(collector module.CacheMetrics, db storage // No errors are expected during normal operation. func (t *TransactionResultErrorMessages) Store(blockID flow.Identifier, transactionResultErrorMessages []flow.TransactionResultErrorMessage) error { return t.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { - return t.batchStore(blockID, transactionResultErrorMessages, rw) + return t.BatchStore(blockID, transactionResultErrorMessages, rw) }) } @@ -101,7 +101,7 @@ func (t *TransactionResultErrorMessages) Exists(blockID flow.Identifier) (bool, // BatchStore inserts a batch of transaction result error messages into a batch // // No errors are expected during normal operation. -func (t *TransactionResultErrorMessages) batchStore( +func (t *TransactionResultErrorMessages) BatchStore( blockID flow.Identifier, transactionResultErrorMessages []flow.TransactionResultErrorMessage, batch storage.ReaderBatchWriter, @@ -163,7 +163,7 @@ func (t *TransactionResultErrorMessages) ByBlockIDTransactionIndex(blockID flow. // ByBlockID gets all transaction result error messages for a block, ordered by transaction index. // Note: This method will return an empty slice both if the block is not indexed yet and if the block does not have any errors. // -// No errors are expected during normal operation. +// No errors are expected during normal operations. func (t *TransactionResultErrorMessages) ByBlockID(blockID flow.Identifier) ([]flow.TransactionResultErrorMessage, error) { transactionResultErrorMessages, err := t.blockCache.Get(t.db.Reader(), blockID) if err != nil { diff --git a/storage/store/transactions.go b/storage/store/transactions.go index 9f43a29b969..f2357ca33ab 100644 --- a/storage/store/transactions.go +++ b/storage/store/transactions.go @@ -1,6 +1,8 @@ package store import ( + "fmt" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" @@ -63,3 +65,13 @@ func (t *Transactions) ByID(txID flow.Identifier) (*flow.TransactionBody, error) func (t *Transactions) RemoveBatch(rw storage.ReaderBatchWriter, txID flow.Identifier) error { return t.cache.RemoveTx(rw, txID) } + +// BatchStore stores transaction within a batch operation. +// No errors are expected during normal operations +func (t *Transactions) BatchStore(tx *flow.TransactionBody, batch storage.ReaderBatchWriter) error { + if err := t.storeTx(batch, tx); err != nil { + return fmt.Errorf("cannot batch insert transaction: %w", err) + } + + return nil +} diff --git a/storage/transaction_result_error_messages.go b/storage/transaction_result_error_messages.go index 2b613c67428..a573bbae8a2 100644 --- a/storage/transaction_result_error_messages.go +++ b/storage/transaction_result_error_messages.go @@ -24,7 +24,7 @@ type TransactionResultErrorMessagesReader interface { // ByBlockID gets all transaction result error messages for a block, ordered by transaction index. // Note: This method will return an empty slice both if the block is not indexed yet and if the block does not have any errors. // - // No errors are expected during normal operation. + // No errors are expected during normal operations. ByBlockID(id flow.Identifier) ([]flow.TransactionResultErrorMessage, error) } @@ -36,4 +36,9 @@ type TransactionResultErrorMessages interface { // // No errors are expected during normal operation. Store(blockID flow.Identifier, transactionResultErrorMessages []flow.TransactionResultErrorMessage) error + + // BatchStore inserts a batch of transaction result error messages into a batch + // + // No errors are expected during normal operation. + BatchStore(blockID flow.Identifier, transactionResultErrorMessages []flow.TransactionResultErrorMessage, batch ReaderBatchWriter) error } diff --git a/storage/transactions.go b/storage/transactions.go index 141655280c2..19783098480 100644 --- a/storage/transactions.go +++ b/storage/transactions.go @@ -19,4 +19,8 @@ type Transactions interface { // Store inserts the transaction, keyed by fingerprint. Duplicate transaction insertion is ignored // No errors are expected during normal operation. Store(tx *flow.TransactionBody) error + + // BatchStore stores transaction within a batch operation. + // No errors are expected during normal operation. + BatchStore(tx *flow.TransactionBody, batch ReaderBatchWriter) error } diff --git a/storage/util/logger.go b/storage/util/logger.go index c57173e70f8..6a3cd914e97 100644 --- a/storage/util/logger.go +++ b/storage/util/logger.go @@ -1,7 +1,7 @@ package util import ( - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" "github.com/rs/zerolog" ) diff --git a/storage/util/testing.go b/storage/util/testing.go index 3ca4df75aab..4842cb342b3 100644 --- a/storage/util/testing.go +++ b/storage/util/testing.go @@ -10,7 +10,6 @@ import ( "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" - bstorage "github.com/onflow/flow-go/storage/badger" "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/store" ) @@ -21,9 +20,9 @@ func ExecutionStorageLayer(_ testing.TB, bdb *badger.DB) *storage.Execution { db := badgerimpl.ToDB(bdb) results := store.NewExecutionResults(metrics, db) - receipts := store.NewExecutionReceipts(metrics, db, results, bstorage.DefaultCacheSize) + receipts := store.NewExecutionReceipts(metrics, db, results, store.DefaultCacheSize) commits := store.NewCommits(metrics, db) - transactionResults := store.NewTransactionResults(metrics, db, bstorage.DefaultCacheSize) + transactionResults := store.NewTransactionResults(metrics, db, store.DefaultCacheSize) events := store.NewEvents(metrics, db) return &storage.Execution{ Results: results, diff --git a/utils/unittest/epoch_builder.go b/utils/unittest/epoch_builder.go index 622ec5ee05d..bc755b28868 100644 --- a/utils/unittest/epoch_builder.go +++ b/utils/unittest/epoch_builder.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage/deferred" ) // EpochHeights is a structure caching the results of building an epoch with @@ -355,7 +356,8 @@ func (builder *EpochBuilder) CompleteEpoch() *EpochBuilder { // addBlock adds the given block to the state by: extending the state, // finalizing the block, and caching the block. func (builder *EpochBuilder) addBlock(block *flow.Block) { - updatedStateId, dbUpdates, err := builder.mutableProtocolState.EvolveState(block.Header.ParentID, block.Header.View, block.Payload.Seals) + dbUpdates := deferred.NewDeferredBlockPersist() + updatedStateId, err := builder.mutableProtocolState.EvolveState(dbUpdates, block.Header.ParentID, block.Header.View, block.Payload.Seals) require.NoError(builder.t, err) require.False(builder.t, dbUpdates.IsEmpty()) diff --git a/utils/unittest/execution_state.go b/utils/unittest/execution_state.go index c3ff2e69924..11ce1320f64 100644 --- a/utils/unittest/execution_state.go +++ b/utils/unittest/execution_state.go @@ -23,7 +23,7 @@ const ServiceAccountPrivateKeySignAlgo = crypto.ECDSAP256 const ServiceAccountPrivateKeyHashAlgo = hash.SHA2_256 // Pre-calculated state commitment with root account with the above private key -const GenesisStateCommitmentHex = "585abb38a755f5514ab8f2386f1831d346b2a7fe2415f1a51dc998cdce4b0bc0" +const GenesisStateCommitmentHex = "70472141c0dffcd88f7785a95a96c3e27813817ea77b9258519af8433b2bd7f8" var GenesisStateCommitment flow.StateCommitment @@ -87,10 +87,10 @@ func genesisCommitHexByChainID(chainID flow.ChainID) string { return GenesisStateCommitmentHex } if chainID == flow.Testnet { - return "a3daaf13edf55851984c38eb82212260f7e0237bed47b73c55a767b3df3c5614" + return "389f66301e315aaa37562878e13ec78fb978cc3e4b16ea5c1fd7ba9277335096" } if chainID == flow.Sandboxnet { return "e1c08b17f9e5896f03fe28dd37ca396c19b26628161506924fbf785834646ea1" } - return "184f9a0deecb20a5af3e9379d585bd1bf1bbb36002ba660c481d0bee618582c0" + return "ef7eaedb67a9d23bb3a9bc0bd8021287b650354e1a12c92c304ea36419450f1a" } diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index b900158d852..34d07c82a80 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -53,8 +53,7 @@ import ( ) const ( - DefaultSeedFixtureLength = 64 - DefaultAddress = "localhost:0" + DefaultAddress = "localhost:0" ) // returns a deterministic math/rand PRG that can be used for deterministic randomness in tests only. @@ -380,6 +379,32 @@ func BlockWithParentFixture(parent *flow.Header) *flow.Block { } } +// BlockWithParentAndUniqueView creates a child block of the given parent. +// We provide a set of views that are _not_ allowed to be used for the new block. A typical usage +// scenario is to create blocks of different forks, without accidentally creating two blocks with +// the same view. +// CAUTION: +// - modifies the set `forbiddenViews` by adding the view of the newly created block. +// - To generate the child's view, we randomly select a small increment and add it to the +// parent's view. If the set of views covers all possible increments, this function will panic +func BlockWithParentAndUniqueView(parent *flow.Header, forbiddenViews map[uint64]struct{}) *flow.Block { + var block *flow.Block + counter := 0 + for { + block = BlockWithParentFixture(parent) + if _, hasForbiddenView := forbiddenViews[block.Header.View]; !hasForbiddenView { + break + } + counter += 1 + if counter > 20 { + panic(fmt.Sprintf("BlockWithParentAndUniqueView failed to generate child despite %d attempts", counter)) + } + } + // block has a view that is not forbidden: + forbiddenViews[block.Header.View] = struct{}{} // add the block's view to `forbiddenViews` to prevent future re-usage + return block +} + func BlockWithParentProtocolState(parent *flow.Block) *flow.Block { payload := PayloadFixture(WithProtocolStateID(parent.Payload.ProtocolStateID)) header := BlockHeaderWithParentFixture(parent.Header) @@ -390,6 +415,33 @@ func BlockWithParentProtocolState(parent *flow.Block) *flow.Block { } } +// BlockWithParentProtocolStateAndUniqueView creates a child block of the given parent, such that +// the child's protocol state is the same as the parent's. +// We provide a set of views that are _not_ allowed to be used for the new block. A typical usage +// scenario is to create blocks of different forks, without accidentally creating two blocks with +// the same view. +// CAUTION: +// - modifies the set `forbiddenViews` by adding the view of the newly created block. +// - To generate the child's view, we randomly select a small increment and add it to the +// parent's view. If the set of views covers all possible increments, this function will panic +func BlockWithParentProtocolStateAndUniqueView(parent *flow.Block, forbiddenViews map[uint64]struct{}) *flow.Block { + var block *flow.Block + counter := 0 + for { + block = BlockWithParentProtocolState(parent) + if _, hasForbiddenView := forbiddenViews[block.Header.View]; !hasForbiddenView { + break + } + counter += 1 + if counter > 20 { + panic(fmt.Sprintf("BlockWithParentProtocolStateAndUniqueView failed to generate child despite %d attempts", counter)) + } + } + // block has a view that is not forbidden: + forbiddenViews[block.Header.View] = struct{}{} // add the block's view to `forbiddenViews` to prevent future re-usage + return block +} + func BlockWithGuaranteesFixture(guarantees []*flow.CollectionGuarantee) *flow.Block { payload := PayloadFixture(WithGuarantees(guarantees...)) header := BlockHeaderFixture() @@ -1149,8 +1201,28 @@ func NodeConfigFixture(opts ...func(*flow.Identity)) bootstrap.NodeConfig { } func NodeInfoFixture(opts ...func(*flow.Identity)) bootstrap.NodeInfo { - opts = append(opts, WithKeys) - return bootstrap.NodeInfoFromIdentity(IdentityFixture(opts...)) + nodes := NodeInfosFixture(1, opts...) + return nodes[0] +} + +// NodeInfoFromIdentity converts an identity to a public NodeInfo +// WARNING: the function replaces the staking key from the identity by a freshly generated one. +func NodeInfoFromIdentity(identity *flow.Identity) bootstrap.NodeInfo { + stakingSK := StakingPrivKeyFixture() + stakingPoP, err := crypto.BLSGeneratePOP(stakingSK) + if err != nil { + panic(err.Error()) + } + + return bootstrap.NewPublicNodeInfo( + identity.NodeID, + identity.Role, + identity.Address, + identity.InitialWeight, + identity.NetworkPubKey, + stakingSK.PublicKey(), + stakingPoP, + ) } func NodeInfosFixture(n int, opts ...func(*flow.Identity)) []bootstrap.NodeInfo { @@ -1158,7 +1230,7 @@ func NodeInfosFixture(n int, opts ...func(*flow.Identity)) []bootstrap.NodeInfo il := IdentityListFixture(n, opts...) nodeInfos := make([]bootstrap.NodeInfo, 0, n) for _, identity := range il { - nodeInfos = append(nodeInfos, bootstrap.NodeInfoFromIdentity(identity)) + nodeInfos = append(nodeInfos, NodeInfoFromIdentity(identity)) } return nodeInfos } @@ -1174,7 +1246,10 @@ func PrivateNodeInfosFixture(n int, opts ...func(*flow.Identity)) []bootstrap.No func PrivateNodeInfosFromIdentityList(il flow.IdentityList) []bootstrap.NodeInfo { nodeInfos := make([]bootstrap.NodeInfo, 0, len(il)) for _, identity := range il { - nodeInfo := bootstrap.PrivateNodeInfoFromIdentity(identity, KeyFixture(crypto.ECDSAP256), KeyFixture(crypto.BLSBLS12381)) + nodeInfo, err := bootstrap.PrivateNodeInfoFromIdentity(identity, KeyFixture(crypto.ECDSAP256), KeyFixture(crypto.BLSBLS12381)) + if err != nil { + panic(err.Error()) + } nodeInfos = append(nodeInfos, nodeInfo) } return nodeInfos @@ -1698,37 +1773,49 @@ func WithChunkID(chunkID flow.Identifier) func(*verification.ChunkDataPackReques // Use options to customize the request. func ChunkDataPackRequestFixture(opts ...func(*verification.ChunkDataPackRequest)) *verification. ChunkDataPackRequest { - req := &verification.ChunkDataPackRequest{ Locator: chunks.Locator{ ResultID: IdentifierFixture(), Index: 0, }, - ChunkDataPackRequestInfo: verification.ChunkDataPackRequestInfo{ - ChunkID: IdentifierFixture(), - Height: 0, - Agrees: IdentifierListFixture(1), - Disagrees: IdentifierListFixture(1), - }, + ChunkDataPackRequestInfo: *ChunkDataPackRequestInfoFixture(), } for _, opt := range opts { opt(req) } + // Ensure Targets reflects current Agrees and Disagrees + req.Targets = makeTargets(req.Agrees, req.Disagrees) + + return req +} + +func ChunkDataPackRequestInfoFixture() *verification.ChunkDataPackRequestInfo { + agrees := IdentifierListFixture(1) + disagrees := IdentifierListFixture(1) + + return &verification.ChunkDataPackRequestInfo{ + ChunkID: IdentifierFixture(), + Height: 0, + Agrees: agrees, + Disagrees: disagrees, + Targets: makeTargets(agrees, disagrees), + } +} + +// makeTargets returns a combined IdentityList for the given agrees and disagrees. +func makeTargets(agrees, disagrees flow.IdentifierList) flow.IdentityList { // creates identity fixtures for target ids as union of agrees and disagrees // TODO: remove this inner fixture once we have filter for identifier list. targets := flow.IdentityList{} - for _, id := range req.Agrees { - targets = append(targets, IdentityFixture(WithNodeID(id), WithRole(flow.RoleExecution))) + for _, id := range append(agrees, disagrees...) { + targets = append(targets, IdentityFixture( + WithNodeID(id), + WithRole(flow.RoleExecution), + )) } - for _, id := range req.Disagrees { - targets = append(targets, IdentityFixture(WithNodeID(id), WithRole(flow.RoleExecution))) - } - - req.Targets = targets - - return req + return targets } func WithChunkDataPackCollection(collection *flow.Collection) func(*flow.ChunkDataPack) { @@ -2159,11 +2246,10 @@ func EpochRecoverFixture(opts ...func(setup *flow.EpochSetup)) *flow.EpochRecove WithClusterQCsFromAssignments(setup.Assignments), ) - ev := &flow.EpochRecover{ + return &flow.EpochRecover{ EpochSetup: *setup, EpochCommit: *commit, } - return ev } func IndexFixture() *flow.Index { @@ -2291,7 +2377,10 @@ func BootstrapFixtureWithChainID( if err != nil { panic(err) } - rootEpochState := inmem.EpochProtocolStateFromServiceEvents(setup, commit) + rootEpochState, err := inmem.EpochProtocolStateFromServiceEvents(setup, commit) + if err != nil { + panic(err) + } rootProtocolState, err := kvstore.NewDefaultKVStore(safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, rootEpochState.ID()) if err != nil { panic(err) @@ -2450,9 +2539,9 @@ func DKGBroadcastMessageFixture() *messages.BroadcastDKGMessage { } } -// PrivateKeyFixture returns a random private key with specified signature algorithm and seed length -func PrivateKeyFixture(algo crypto.SigningAlgorithm, seedLength int) crypto.PrivateKey { - sk, err := crypto.GeneratePrivateKey(algo, SeedFixture(seedLength)) +// PrivateKeyFixture returns a random private key with specified signature algorithm +func PrivateKeyFixture(algo crypto.SigningAlgorithm) crypto.PrivateKey { + sk, err := crypto.GeneratePrivateKey(algo, SeedFixture(crypto.KeyGenSeedMinLen)) if err != nil { panic(err) } @@ -2466,8 +2555,9 @@ func PrivateKeyFixtureByIdentifier( seedLength int, id flow.Identifier, ) crypto.PrivateKey { - seed := append(id[:], id[:]...) - sk, err := crypto.GeneratePrivateKey(algo, seed[:seedLength]) + seed := make([]byte, seedLength) + copy(seed, id[:]) + sk, err := crypto.GeneratePrivateKey(algo, seed) if err != nil { panic(err) } @@ -2480,18 +2570,18 @@ func StakingPrivKeyByIdentifier(id flow.Identifier) crypto.PrivateKey { // NetworkingPrivKeyFixture returns random ECDSAP256 private key func NetworkingPrivKeyFixture() crypto.PrivateKey { - return PrivateKeyFixture(crypto.ECDSAP256, crypto.KeyGenSeedMinLen) + return PrivateKeyFixture(crypto.ECDSAP256) } // StakingPrivKeyFixture returns a random BLS12381 private keyf func StakingPrivKeyFixture() crypto.PrivateKey { - return PrivateKeyFixture(crypto.BLSBLS12381, crypto.KeyGenSeedMinLen) + return PrivateKeyFixture(crypto.BLSBLS12381) } func NodeMachineAccountInfoFixture() bootstrap.NodeMachineAccountInfo { return bootstrap.NodeMachineAccountInfo{ Address: RandomAddressFixture().String(), - EncodedPrivateKey: PrivateKeyFixture(crypto.ECDSAP256, DefaultSeedFixtureLength).Encode(), + EncodedPrivateKey: PrivateKeyFixture(crypto.ECDSAP256).Encode(), HashAlgorithm: bootstrap.DefaultMachineAccountHashAlgo, SigningAlgorithm: bootstrap.DefaultMachineAccountSignAlgo, KeyIndex: bootstrap.DefaultMachineAccountKeyIndex, @@ -2710,6 +2800,61 @@ func ChunkExecutionDataFixture(t *testing.T, minSize int, opts ...func(*executio } } +func WithTxResultErrorMessageTxID(id flow.Identifier) func(txResErrMsg *flow.TransactionResultErrorMessage) { + return func(txResErrMsg *flow.TransactionResultErrorMessage) { + txResErrMsg.TransactionID = id + } +} + +func WithTxResultErrorMessageIndex(index uint32) func(txResErrMsg *flow.TransactionResultErrorMessage) { + return func(txResErrMsg *flow.TransactionResultErrorMessage) { + txResErrMsg.Index = index + } +} + +func WithTxResultErrorMessageTxMsg(message string) func(txResErrMsg *flow.TransactionResultErrorMessage) { + return func(txResErrMsg *flow.TransactionResultErrorMessage) { + txResErrMsg.ErrorMessage = message + } +} + +func WithTxResultErrorMessageExecutorID(id flow.Identifier) func(txResErrMsg *flow.TransactionResultErrorMessage) { + return func(txResErrMsg *flow.TransactionResultErrorMessage) { + txResErrMsg.ExecutorID = id + } +} + +// TransactionResultErrorMessageFixture creates a fixture tx result error message with random generated tx ID and executor ID for test purpose. +func TransactionResultErrorMessageFixture(opts ...func(*flow.TransactionResultErrorMessage)) flow.TransactionResultErrorMessage { + txResErrMsg := flow.TransactionResultErrorMessage{ + TransactionID: IdentifierFixture(), + Index: 0, + ErrorMessage: "transaction result error", + ExecutorID: IdentifierFixture(), + } + + for _, opt := range opts { + opt(&txResErrMsg) + } + + return txResErrMsg +} + +// TransactionResultErrorMessagesFixture creates a fixture collection of tx result error messages with n elements. +func TransactionResultErrorMessagesFixture(n int) []flow.TransactionResultErrorMessage { + txResErrMsgs := make([]flow.TransactionResultErrorMessage, 0, n) + executorID := IdentifierFixture() + + for i := 0; i < n; i++ { + txResErrMsgs = append(txResErrMsgs, TransactionResultErrorMessageFixture( + WithTxResultErrorMessageIndex(uint32(i)), + WithTxResultErrorMessageTxMsg(fmt.Sprintf("transaction result error %d", i)), + WithTxResultErrorMessageExecutorID(executorID), + )) + } + return txResErrMsgs +} + // RootEpochProtocolStateFixture creates a fixture with correctly structured Epoch sub-state. // The epoch substate is part of the overall protocol state (KV store). // This can be useful for testing bootstrap when there is no previous epoch. @@ -2895,6 +3040,8 @@ func WithValidDKG() func(*flow.RichEpochStateEntry) { commit.DKGParticipantKeys = append(commit.DKGParticipantKeys, KeyFixture(crypto.BLSBLS12381).PublicKey()) commit.DKGIndexMap[nodeID] = index } + // update CommitID according to new CurrentEpochCommit object + entry.MinEpochStateEntry.CurrentEpoch.CommitID = entry.CurrentEpochCommit.ID() } } diff --git a/utils/unittest/incorporated_results.go b/utils/unittest/incorporated_results.go index e61ebf1fb56..ade11768f88 100644 --- a/utils/unittest/incorporated_results.go +++ b/utils/unittest/incorporated_results.go @@ -7,9 +7,9 @@ var IncorporatedResult incorporatedResultFactory type incorporatedResultFactory struct{} func (f *incorporatedResultFactory) Fixture(opts ...func(*flow.IncorporatedResult)) *flow.IncorporatedResult { - result := ExecutionResultFixture() - incorporatedBlockID := IdentifierFixture() - ir := flow.NewIncorporatedResult(incorporatedBlockID, result) + ir, _ := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: IdentifierFixture(), + Result: ExecutionResultFixture()}) for _, apply := range opts { apply(ir) diff --git a/utils/unittest/locks.go b/utils/unittest/locks.go new file mode 100644 index 00000000000..8fbbc4d192d --- /dev/null +++ b/utils/unittest/locks.go @@ -0,0 +1,16 @@ +package unittest + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" +) + +func WithLock(t *testing.T, manager lockctx.Manager, lockID string, fn func(lctx lockctx.Context) error) { + t.Helper() + lctx := manager.NewContext() + require.NoError(t, lctx.AcquireLock(lockID)) + defer lctx.Release() + require.NoError(t, fn(lctx)) +} diff --git a/utils/unittest/protocol_state.go b/utils/unittest/protocol_state.go index 19f7fb4cb0c..ef4bafd0038 100644 --- a/utils/unittest/protocol_state.go +++ b/utils/unittest/protocol_state.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage/deferred" ) // FinalizedProtocolStateWithParticipants returns a protocol state with finalized participants @@ -86,7 +87,9 @@ func SealBlock(t *testing.T, st protocol.ParticipantState, mutableProtocolState block3 := BlockWithParentFixture(block2.Header) seals := []*flow.Seal{seal} - updatedStateId, dbUpdates, err := mutableProtocolState.EvolveState(block3.Header.ParentID, block3.Header.View, seals) + + dbUpdates := deferred.NewDeferredBlockPersist() + updatedStateId, err := mutableProtocolState.EvolveState(dbUpdates, block3.Header.ParentID, block3.Header.View, seals) require.NoError(t, err) require.False(t, dbUpdates.IsEmpty()) diff --git a/utils/unittest/unittest.go b/utils/unittest/unittest.go index 77970269c68..3176628ad83 100644 --- a/utils/unittest/unittest.go +++ b/utils/unittest/unittest.go @@ -14,7 +14,7 @@ import ( "testing" "time" - "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" "github.com/libp2p/go-libp2p/core/peer" "github.com/onflow/crypto" @@ -319,6 +319,16 @@ func RunWithTempDir(t testing.TB, f func(string)) { f(dbDir) } +func RunWithTempDirs(t testing.TB, f func(string, string)) { + dbDir := TempDir(t) + dbDir2 := TempDir(t) + defer func() { + require.NoError(t, os.RemoveAll(dbDir)) + require.NoError(t, os.RemoveAll(dbDir2)) + }() + f(dbDir, dbDir2) +} + func badgerDB(t testing.TB, dir string, create func(badger.Options) (*badger.DB, error)) *badger.DB { opts := badger. DefaultOptions(dir). @@ -387,7 +397,9 @@ func TempPebbleDBWithOpts(t testing.TB, opts *pebble.Options) (*pebble.DB, strin func RunWithPebbleDB(t testing.TB, f func(*pebble.DB)) { RunWithTempDir(t, func(dir string) { - db, err := pebble.Open(dir, &pebble.Options{}) + db, err := pebble.Open(dir, &pebble.Options{ + FormatMajorVersion: pebble.FormatNewest, + }) require.NoError(t, err) defer func() { assert.NoError(t, db.Close()) @@ -413,13 +425,17 @@ func RunWithBadgerDBAndPebbleDB(t testing.TB, f func(*badger.DB, *pebble.DB)) { } func PebbleDB(t testing.TB, dir string) *pebble.DB { - db, err := pebble.Open(dir, &pebble.Options{}) + db, err := pebble.Open(dir, &pebble.Options{ + FormatMajorVersion: pebble.FormatNewest, + }) require.NoError(t, err) return db } func TypedPebbleDB(t testing.TB, dir string, create func(string, *pebble.Options) (*pebble.DB, error)) *pebble.DB { - db, err := create(dir, &pebble.Options{}) + db, err := create(dir, &pebble.Options{ + FormatMajorVersion: pebble.FormatNewest, + }) require.NoError(t, err) return db }