diff --git a/.gitbook/assets/Screenshot 2023-07-14 102519.png b/.gitbook/assets/Screenshot 2023-07-14 102519.png
new file mode 100644
index 000000000..97ded2b69
Binary files /dev/null and b/.gitbook/assets/Screenshot 2023-07-14 102519.png differ
diff --git a/.gitbook/assets/home-icon.png b/.gitbook/assets/home-icon.png
new file mode 100644
index 000000000..d5fe25502
Binary files /dev/null and b/.gitbook/assets/home-icon.png differ
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
deleted file mode 100644
index f48ad6b5c..000000000
--- a/.github/dependabot.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-# To get started with Dependabot version updates, you'll need to specify which
-# package ecosystems to update and where the package manifests are located.
-# Please see the documentation for all configuration options:
-# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
-
-version: 2
-updates:
- - package-ecosystem: "npm" # See documentation for possible values
- directory: "/" # Location of package manifests
- schedule:
- interval: "daily"
- groups:
- dev-dependencies:
- patterns:
- - "*"
- ignore:
- - dependency-name: "@hyas/images"
- versions: ["3.x"]
- - package-ecosystem: "github-actions"
- directory: "/"
- schedule:
- interval: "weekly"
diff --git a/.github/workflows/check-external-links.yml b/.github/workflows/check-external-links.yml
deleted file mode 100644
index 05726e97e..000000000
--- a/.github/workflows/check-external-links.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-name: Check external links
-
-on:
- pull_request:
- branches: [ main, staging, production ]
-
-jobs:
- linkChecker:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
-
- - name: Link Checker
- id: lychee
- uses: lycheeverse/lychee-action@v1.8.0
- with:
- args: --accept 200,429,403 '**/*.md'
- fail: true
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
deleted file mode 100644
index 2a2ccc743..000000000
--- a/.github/workflows/codeql-analysis.yml
+++ /dev/null
@@ -1,62 +0,0 @@
-# For most projects, this workflow file will not need changing; you simply need
-# to commit it to your repository.
-#
-# You may wish to alter this file to override the set of languages analyzed,
-# or to provide custom queries or build logic.
-name: "CodeQL"
-
-on:
- push:
- branches: [master]
- pull_request:
- # The branches below must be a subset of the branches above
- branches: [master]
- schedule:
- - cron: '0 11 * * 5'
-
-jobs:
- analyze:
- name: Analyze
- runs-on: ubuntu-latest
-
- strategy:
- fail-fast: false
- matrix:
- # Override automatic language detection by changing the below list
- # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python']
- language: ['javascript']
- # Learn more...
- # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v3
-
- # Initializes the CodeQL tools for scanning.
- - name: Initialize CodeQL
- uses: github/codeql-action/init@v2
- with:
- languages: ${{ matrix.language }}
- # If you wish to specify custom queries, you can do so here or in a config file.
- # By default, queries listed here will override any specified in a config file.
- # Prefix the list here with "+" to use these queries and those in the config file.
- # queries: ./path/to/local/query, your-org/your-repo/queries@main
-
- # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
- # If this step fails, then you should remove it and run the build manually (see below)
- - name: Autobuild
- uses: github/codeql-action/autobuild@v2
-
- # ℹ️ Command-line programs to run using the OS shell.
- # 📚 https://git.io/JvXDl
-
- # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
- # and modify them (or add more) to build your code if your project
- # uses a compiled language
-
- #- run: |
- # make bootstrap
- # make release
-
- - name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v2
diff --git a/.github/workflows/node.js-ci.yml b/.github/workflows/node.js-ci.yml
deleted file mode 100644
index e9f33a434..000000000
--- a/.github/workflows/node.js-ci.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-name: Hyas CI
-
-on:
- push:
- branches: master
- pull_request:
- branches: master
-
-jobs:
- build:
- runs-on: ${{ matrix.os }}
-
- strategy:
- matrix:
- os: [ubuntu-latest, windows-latest, macos-latest]
- node: [14.x, 16.x]
-
- steps:
- - name: Check out Hyas project
- uses: actions/checkout@v3
-
- - name: Set up Node.js ${{ matrix.node }}
- uses: actions/setup-node@v3
- with:
- node-version: ${{ matrix.node }}
-
- - name: Install dependencies
- run: npm ci
-
- - name: Run Hyas test script
- run: npm test
-
- - name: Build production website
- run: npm run build
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 000000000..930deb06e
--- /dev/null
+++ b/README.md
@@ -0,0 +1,13 @@
+---
+description: >-
+ This section aims to provide a comprehensive overview of Filecoin to
+ developers and serves as a reference that developers can check back on.
+---
+
+# What is Filecoin
+
+Filecoin is a peer-to-peer network that stores files, with built-in economic incentives and cryptography to ensure files are stored reliably over time. In Filecoin, users pay to store their files on storage providers. Storage providers are computers responsible for storing files and proving they have stored them correctly over time. Anyone who wants to store their files or get paid for storing other users’ files can join Filecoin. Available storage, and the price of that storage, are not controlled by any single company. Instead, Filecoin facilitates open markets for storing and retrieving files that anyone can participate in.
+
+Filecoin is built on top of the same software powering [IPFS protocol](https://docs.ipfs.tech/), which is a peer-to-peer distributed storage network that leverages [content addressing](https://docs.ipfs.tech/concepts/content-addressing/) to allow permanent references to the data, and avoids relying on specific devices or cloud servers for addressing the content. Filecoin is different from IPFS because it has an incentive layer on top to incentivize contents to be reliably stored and accessed.
+
+Filecoin enables several use cases, from Web3 native NFT and metaverse/game assets storage, incentivized permanent storage, to archiving Web2 datasets as a cheaper alternative to cloud storage. For example, [NFT.Storage](https://nft.storage/) utilizes Filecoin to provide a simple decentralized storage solution for NFT contents and metadata, while [Shoah Foundation](https://sfi.usc.edu/) and [Internet Archive](https://archive.org/) leverages Filecoin to backup their contents. Filecoin also supports a wide range of formats of data, including audio and video files, allowing Web3 platforms such as [Audius](https://audius.co/) and [Huddle01](https://huddle01.com/) to leverage Filecoin as the decentralized storage backend for music streaming and video conferencing.
diff --git a/SUMMARY.md b/SUMMARY.md
new file mode 100644
index 000000000..1480b4ae7
--- /dev/null
+++ b/SUMMARY.md
@@ -0,0 +1,173 @@
+# Table of contents
+
+## Basics
+
+* [What is Filecoin](README.md)
+ * [Crypto-economics](basics/what-is-filecoin/crypto-economics.md)
+ * [Blockchain](basics/what-is-filecoin/blockchain.md)
+ * [Storage model](basics/what-is-filecoin/storage-model.md)
+ * [Storage market](basics/what-is-filecoin/storage-market.md)
+ * [Retrieval market](basics/what-is-filecoin/retrieval-market.md)
+ * [Programming on Filecoin](basics/what-is-filecoin/programming-on-filecoin.md)
+ * [Networks](basics/what-is-filecoin/networks.md)
+* [The blockchain](basics/the-blockchain/README.md)
+ * [Actors](basics/the-blockchain/actors.md)
+ * [Addresses](basics/the-blockchain/addresses.md)
+ * [Blocks and tipsets](basics/the-blockchain/blocks-and-tipsets.md)
+ * [Consensus](basics/the-blockchain/consensus.md)
+ * [Drand](basics/the-blockchain/drand.md)
+ * [Proofs](basics/the-blockchain/proofs.md)
+* [Assets](basics/assets/README.md)
+ * [The FIL token](basics/assets/the-fil-token.md)
+ * [Wallets](basics/assets/wallets.md)
+ * [Metamask setup](basics/assets/metamask-setup.md)
+ * [Get FIL](basics/assets/get-fil.md)
+ * [Transfer FIL](basics/assets/transfer-fil.md)
+* [Interplanetary consensus](basics/interplanetary-consensus/README.md)
+ * [Hierarchical consensus](basics/interplanetary-consensus/hierarchical-consensus.md)
+ * [IPC agent](basics/interplanetary-consensus/ipc-agent.md)
+ * [Mir and Trantor](basics/interplanetary-consensus/mir-and-trantor.md)
+* [How storage works](basics/how-storage-works/README.md)
+ * [Filecoin plus](basics/how-storage-works/filecoin-plus.md)
+ * [Storage onramps](basics/how-storage-works/storage-onramps.md)
+ * [Filecoin and IPFS](basics/how-storage-works/filecoin-and-ipfs.md)
+* [How retrieval works](basics/how-retrieval-works/README.md)
+ * [Basic retrieval](basics/how-retrieval-works/basic-retrieval.md)
+ * [Serving retrievals](basics/how-retrieval-works/serving-retrievals.md)
+ * [Saturn](basics/how-retrieval-works/saturn.md)
+* [Project and community](basics/project-and-community/README.md)
+ * [Chat and discussion forums](basics/project-and-community/chat-and-discussion-forums.md)
+ * [Filecoin compared to](basics/project-and-community/filecoin-compared-to.md)
+ * [Filecoin FAQs](basics/project-and-community/filecoin-faqs.md)
+ * [Related projects](basics/project-and-community/related-projects.md)
+ * [Social media](basics/project-and-community/social-media.md)
+ * [The Filecoin project](basics/project-and-community/the-filecoin-project.md)
+ * [Ways to contribute](basics/project-and-community/ways-to-contribute.md)
+
+## Storage providers
+
+* [Basics](storage-providers/basics/README.md)
+ * [Quickstart guide](storage-providers/basics/quickstart-guide.md)
+* [Filecoin economics](storage-providers/filecoin-economics/README.md)
+ * [Storage proving](storage-providers/filecoin-economics/storage-proving.md)
+ * [FIL collateral](storage-providers/filecoin-economics/fil-collateral.md)
+ * [Block rewards](storage-providers/filecoin-economics/block-rewards.md)
+ * [Slashing](storage-providers/filecoin-economics/slashing.md)
+ * [Committed capacity](storage-providers/filecoin-economics/committed-capacity.md)
+* [Filecoin deals](storage-providers/filecoin-deals/README.md)
+ * [Storage deals](storage-providers/filecoin-deals/storage-deals.md)
+ * [Verified deals](storage-providers/filecoin-deals/verified-deals.md)
+ * [Filecoin programs](storage-providers/filecoin-deals/filecoin-programs.md)
+ * [Snap deals](storage-providers/filecoin-deals/snap-deals.md)
+ * [Charging for data](storage-providers/filecoin-deals/charging-for-data.md)
+ * [Auxiliary services](storage-providers/filecoin-deals/auxiliary-services.md)
+ * [Return-on-investment](storage-providers/filecoin-deals/return-on-investment.md)
+* [Architecture](storage-providers/architecture/README.md)
+ * [Lotus components](storage-providers/architecture/lotus-components.md)
+ * [Storage provider automation](storage-providers/architecture/lotus-automation.md)
+ * [Sealing pipeline](storage-providers/architecture/sealing-pipeline.md)
+ * [Sealing rate](storage-providers/architecture/sealing-rate.md)
+ * [Sealing-as-a-service](storage-providers/architecture/sealing-as-a-service.md)
+ * [Network indexer](storage-providers/architecture/network-indexer.md)
+* [Infrastructure](storage-providers/infrastructure/README.md)
+ * [Storage](storage-providers/infrastructure/storage.md)
+ * [Network](storage-providers/infrastructure/network.md)
+ * [Backup and disaster recovery](storage-providers/infrastructure/backup-and-disaster-recovery.md)
+ * [Reference architectures](storage-providers/infrastructure/reference-architectures.md)
+* [Skills](storage-providers/skills/README.md)
+ * [Linux](storage-providers/skills/linux.md)
+ * [Network](storage-providers/skills/network.md)
+ * [Security](storage-providers/skills/security.md)
+ * [Storage](storage-providers/skills/storage.md)
+ * [Sales](storage-providers/skills/sales.md)
+ * [Industry](storage-providers/skills/industry.md)
+
+## Nodes
+
+* [Implementations](nodes/implementations/README.md)
+ * [Lotus](nodes/implementations/lotus.md)
+ * [Venus](nodes/implementations/venus.md)
+* [Full-nodes](nodes/full-nodes/README.md)
+ * [Pre-requisites](nodes/full-nodes/pre-requisites.md)
+ * [Basic setup](nodes/full-nodes/basic-setup.md)
+ * [Node providers](nodes/full-nodes/node-providers.md)
+* [Lite-nodes](nodes/lite-nodes/README.md)
+ * [Spin up a lite-node](nodes/lite-nodes/spin-up-a-lite-node.md)
+
+## Smart contracts
+
+* [Fundamentals](smart-contracts/fundamentals/README.md)
+ * [The FVM](smart-contracts/fundamentals/the-fvm.md)
+ * [Filecoin EVM runtime](smart-contracts/fundamentals/filecoin-evm-runtime.md)
+ * [ERC-20 quickstart](smart-contracts/fundamentals/erc-20-quickstart.md)
+ * [Roadmap](smart-contracts/fundamentals/roadmap.md)
+ * [Support](smart-contracts/fundamentals/support.md)
+ * [FAQs](smart-contracts/fundamentals/faqs.md)
+* [Filecoin EVM-runtime](smart-contracts/filecoin-evm-runtime/README.md)
+ * [Actor types](smart-contracts/filecoin-evm-runtime/actor-types.md)
+ * [Address types](smart-contracts/filecoin-evm-runtime/address-types.md)
+ * [FILForwarder](smart-contracts/filecoin-evm-runtime/filforwarder.md)
+ * [Difference with Ethereum](smart-contracts/filecoin-evm-runtime/difference-with-ethereum.md)
+ * [How gas works](smart-contracts/filecoin-evm-runtime/how-gas-works.md)
+ * [Precompiles](smart-contracts/filecoin-evm-runtime/precompiles.md)
+* [Developing contracts](smart-contracts/developing-contracts/README.md)
+ * [Get test tokens](smart-contracts/developing-contracts/get-test-tokens.md)
+ * [Remix](smart-contracts/developing-contracts/remix.md)
+ * [Hardhat](smart-contracts/developing-contracts/hardhat.md)
+ * [Foundry](smart-contracts/developing-contracts/foundry.md)
+ * [Solidity libraries](smart-contracts/developing-contracts/solidity-libraries.md)
+ * [Call built-in actors](smart-contracts/developing-contracts/call-built-in-actors.md)
+ * [Filecoin.sol](smart-contracts/developing-contracts/filecoin.sol.md)
+ * [Client contract tutorial](smart-contracts/developing-contracts/client-contract-tutorial.md)
+ * [Verify a contract](smart-contracts/developing-contracts/verify-a-contract.md)
+ * [Best practices](smart-contracts/developing-contracts/best-practices.md)
+* [Advanced](smart-contracts/advanced/README.md)
+ * [Wrapped FIL](smart-contracts/advanced/wrapped-fil.md)
+ * [Oracles](smart-contracts/advanced/oracles.md)
+ * [Cross-chain bridges](smart-contracts/advanced/cross-chain-bridges.md)
+
+## Networks
+
+* [Mainnet](networks/mainnet/README.md)
+ * [Explorers](networks/mainnet/explorers.md)
+ * [RPCs](networks/mainnet/rpcs.md)
+ * [Network performance](networks/mainnet/network-performance.md)
+* [Calibration](networks/calibration/README.md)
+ * [Explorers](networks/calibration/explorers.md)
+ * [RPCs](networks/calibration/rpcs.md)
+* [Spacenet](networks/spacenet/README.md)
+ * [RPCs](networks/spacenet/rpcs.md)
+ * [Get test tokens](networks/spacenet/get-test-tokens.md)
+* [Local testnet](networks/local-testnet/README.md)
+ * [Get test tokens](networks/local-testnet/get-test-tokens.md)
+
+## Reference
+
+* [General](reference/general/README.md)
+ * [Glossary](reference/general/glossary.md)
+ * [Specifications](reference/general/specifications.md)
+ * [Tools](reference/general/tools.md)
+* [Exchanges](reference/exchanges/README.md)
+ * [Exchange integration](reference/exchanges/exchange-integration.md)
+* [Built-in actors](reference/built-in-actors/README.md)
+ * [Protocol API](reference/built-in-actors/protocol-api.md)
+ * [Filecoin.sol](reference/built-in-actors/filecoin.sol.md)
+* [JSON-RPC](reference/json-rpc/README.md)
+ * [Auth](reference/json-rpc/auth.md)
+ * [Beacon](reference/json-rpc/beacon.md)
+ * [Chain](reference/json-rpc/chain.md)
+ * [Client](reference/json-rpc/client.md)
+ * [Create](reference/json-rpc/create.md)
+ * [Eth](reference/json-rpc/eth.md)
+ * [Gas](reference/json-rpc/gas.md)
+ * [I](reference/json-rpc/i.md)
+ * [Log](reference/json-rpc/log.md)
+ * [Market](reference/json-rpc/market.md)
+ * [Miner](reference/json-rpc/miner.md)
+ * [Mpool](reference/json-rpc/mpool.md)
+ * [Msig](reference/json-rpc/msig.md)
+ * [Net](reference/json-rpc/net.md)
+ * [Paych](reference/json-rpc/paych.md)
+ * [State](reference/json-rpc/state.md)
+ * [Sync](reference/json-rpc/sync.md)
+ * [Wallet](reference/json-rpc/wallet.md)
diff --git a/basics/assets/README.md b/basics/assets/README.md
new file mode 100644
index 000000000..b2d84dab0
--- /dev/null
+++ b/basics/assets/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ The section covers the assets you can find on the Filecoin network, along with
+ how to securely manage and use them.
+---
+
+# Assets
+
diff --git a/basics/assets/get-fil.md b/basics/assets/get-fil.md
new file mode 100644
index 000000000..2478cb9aa
--- /dev/null
+++ b/basics/assets/get-fil.md
@@ -0,0 +1,39 @@
+---
+description: >-
+ The most common way to get FIL is to use an exchange. You should be aware of
+ some specific steps when trying to transfer FIL from an exchange to your
+ wallet.
+---
+
+# Get FIL
+
+## Exchanges
+
+A cryptocurrency exchange is a digital platform where users can buy, sell, and trade cryptocurrencies for other cryptocurrencies or traditional fiat currencies like USD, EUR, or JPY.
+
+Cryptocurrency exchanges provide a marketplace for users to trade their digital assets and are typically run by private companies that facilitate these transactions. These exchanges can differ in terms of fees, security protocols, and the variety of cryptocurrencies they support.
+
+Users can typically sign up for an account with a cryptocurrency exchange, deposit funds into their account, and then use those funds to purchase or sell cryptocurrencies at the current market price. Some exchanges offer advanced trading features like margin trading, stop-loss orders, and trading bots.
+
+It’s important to note that while cryptocurrency exchanges can offer convenience and liquidity for traders, they also come with risks like hacking and regulatory uncertainty. Therefore, users should take precautions to protect their funds and do their own research before using any particular exchange.
+
+### Supported exchanges
+
+There are many exchanges that allow users to buy, sell, and trade FIL. Websites like [coingecko.com](https://www.coingecko.com/) and [coinmarketcap.com](https://coinmarketcap.com/currencies/filecoin/markets/) keep track of which exchanges support which cryptocurrencies. You can use these lists to help decide which exchange to use.
+
+Once you have found an exchange you want to use, you will have to create an account with that exchange. Many exchanges have strict verification and Know-Your-Customer (KYC) processes in place, so it may take a few days to create your account. However, most large exchanges can verify your information in a few minutes.
+
+Purchasing cryptocurrency varies from exchange to exchange, but the process is usually something like this:
+
+1. Add funds to your exchange account in your local currency (USD, EUR, YEN, etc.).
+2. Exchange your local currency for FIL at a set price.
+
+### Address compatibility
+
+Some exchanges allow users to fund and withdraw FIL using any of the [Filecoin address type](../../smart-contracts/filecoin-evm-runtime/address-types.md). However, some exchanges only support one, or a handful, of the available address types. Most exchanges do not currently support [f410 addresses](../the-blockchain/addresses.md) .
+
+If your exchange does not yet support Filecoin Eth-style 0x addresses, you must create a wallet to _relay_ the funds through. Take a look at the [Transfer FIL page](transfer-fil.md) for details on how to transfer your funds safely.
+
+## Test FIL
+
+If you’re looking to get FIL to test your applications on a testnet like [Calibration](../../networks/calibration/) then check how to get test tokens! Test FIL is often referred to as `tFIL`.
diff --git a/basics/assets/metamask-setup.md b/basics/assets/metamask-setup.md
new file mode 100644
index 000000000..abbb1e90a
--- /dev/null
+++ b/basics/assets/metamask-setup.md
@@ -0,0 +1,93 @@
+---
+description: >-
+ MetaMask is a popular browser extension that allows users to interact with
+ blockchain applications. This guide shows you how to configure MetaMask to
+ work with the Filecoin
+---
+
+# Metamask setup
+
+## Using Chainlist
+
+Chainlist is a website that lets users easily connect their wallets to EVM-compatible blockchains. Chainlist is managed by [DeFi Llama](https://defillama.com/). Chainlist is the simplest way to add the Filecoin network to your MetaMask wallet.
+
+{% tabs %}
+{% tab title="Mainnet" %}
+1. Navigate to [chainlist.network](https://chainlist.network).
+2. Search for `Filecoin Mainnet`.
+3. Click **Connect Wallet**.
+4. Click **Approve** when prompted to _Allow this site to add a network_.
+5. Click **Switch network** when prompted by MetaMask.
+6. Open MetaMask from the browser extensions tab.
+7. You should see _Filecoin_ listed at the top.
+
+You can now use MetaMask to interact with the Filecoin network.
+{% endtab %}
+
+{% tab title="Calibration" %}
+1. Navigate to [chainlist.network](https://chainlist.network).
+2. Search for `Filecoin Calibration`.
+3. Click **Connect Wallet**.
+4. Click **Approve** when prompted to _Allow this site to add a network_.
+5. You may be shown a warning that you are connecting to a test network. If prompted, click **Accept**.
+6. Click **Switch network** when prompted by MetaMask.
+7. Open MetaMask from the browser extensions tab. You should see _Filecoin Calibration_ listed at the top.
+
+You can now use MetaMask to interact with the Filecoin network.
+{% endtab %}
+
+{% tab title="Local testnet" %}
+1. Navigate to [chainlist.network](https://chainlist.network).
+2. Search for `Filecoin Local testnet`.
+3. Click **Connect Wallet**.
+4. Click **Approve** when prompted to _Allow this site to add a network_.
+5. You may be shown a warning that you are connecting to a test network. If prompted, click **Accept**.
+6. Click **Switch network** when prompted by MetaMask.
+7. Open MetaMask from the browser extensions tab. You should see _Filecoin Local testnet_ listed at the top.
+
+You can now use MetaMask to interact with the Filecoin network.
+{% endtab %}
+{% endtabs %}
+
+## Manual process
+
+If you can’t or don’t want to use Chainlist, you can add the Filecoin network to your MetaMask manually.
+
+### Prerequisites
+
+Before we get started, you’ll need the following:
+
+* A [Chromium-based browser](https://en.wikipedia.org/wiki/Chromium\_web\_browser#Browsers\_based\_on\_Chromium), or [Firefox](https://www.mozilla.org/en-CA/firefox/products/).
+* A browser with [MetaMask](https://metamask.io/) installed.
+
+### Steps
+
+The process for configuring MetaMask to use Filecoin is fairly simple, but has some very specific variables that you must copy exactly.
+
+1. Open your browser and open the MetaMask plugin. If you haven’t opened the MetaMask plugin before, you’ll be prompted to create a new wallet. Follow the prompts to create a wallet.
+2. Click the user circle and select **Settings.**
+3. Select **Networks**.
+4. Click **Add a network**.
+5. Scroll down and click **Add a network manually**.
+6. Enter the following information into the fields:
+
+{% tabs %}
+{% tab title="Mainnet" %}
+
Field Value Network name Filecoin
New RPC URL Either: - https://api.node.glif.io/rpc/v1
- https://filecoin.chainup.net/rpc/v1
- https://filecoin-mainnet.chainstacklabs.com/rpc/v1
- https://infura.sftproject.io/filecoin/rpc/v1
- https://rpc.ankr.com/filecoin
Chain ID 314
Currency symbol FIL
+{% endtab %}
+
+{% tab title="Calibration" %}
+Field Value Network name Filecoin Calibration testnet
New RPC URL Either: - https://api.calibration.node.glif.io/rpc/v1
- https://filecoin-calibration.chainup.net/rpc/v1
Chain ID 314159
Currency symbol tFIL
+{% endtab %}
+
+{% tab title="Local testnet" %}
+Field Value Network name Filecoin Local testnet
New RPC URL http://localhost:1234/rpc/v1
Chain ID 31415926
Currency symbol tFIL
+{% endtab %}
+{% endtabs %}
+
+7. Pick one a block explorers from the [Networks section](broken-reference/), and enter the URL into the **Block explorer (optional)** field.
+8. Review the values in the fields and click **Save**.
+9. The Filecoin network should now be shown in your MetaMask window.
+10. Done!
+
+You can now use MetaMask to interact with the Filecoin network.
diff --git a/basics/assets/the-fil-token.md b/basics/assets/the-fil-token.md
new file mode 100644
index 000000000..00b54cccb
--- /dev/null
+++ b/basics/assets/the-fil-token.md
@@ -0,0 +1,43 @@
+---
+description: >-
+ FIL is the cryptocurrency that powers the Filecoin network. This page explains
+ what FIL is, how it can be used, and it’s denominations.
+---
+
+# The FIL token
+
+## Uses
+
+FIL plays a vital role in incentivizing users to participate in the Filecoin network and ensuring its smooth operation. Here are some ways in which FIL is used on the Filecoin network:
+
+### Storage payments
+
+When a user wants to store data on the Filecoin network, they pay in FIL to the storage providers who offer their storage space. The payment is made in advance for a certain amount of time that the data will be stored on the network. The storage providers are then rewarded with FIL for providing their storage space and performing other useful tasks on the network.
+
+### Retrieval payments
+
+When a user wants to retrieve their data from the Filecoin network, they make a payment in FIL to the storage providers who stored the data. This payment incentivizes the storage providers to keep the data available for retrieval at all times.
+
+### Blockchain rewards
+
+FIL is used to reward providers who validate and add new blocks to the Filecoin blockchain. Providers receive a block reward in FIL for each new block they add to the blockchain and also earn transaction fees in FIL for processing storage and retrieval transactions.
+
+### Governance
+
+FIL is used for network governance, allowing FIL holders to vote on proposals and make decisions that impact the future development and direction of the network.
+
+## Denominations
+
+FIL, NanoFIL, and PicoFIL are all denominated in the same cryptocurrency unit, but they represent different levels of precision and granularity. For most users, FIL is the main unit of measurement and is used for most transactions and payments on the Filecoin network.
+
+Much like how a US penny represents a fraction of a US dollar, there are many ways to represent value using Filecoin. This is because some actions on the Filecoin network require substantially less value than 1 whole `FIL`. The different denominations of `FIL` you may see referenced across the ecosystem are:
+
+| Name | Decimal |
+| -------- | ------------------------- |
+| FIL | 1 |
+| milliFIL | 1,000 |
+| microFIL | 1,000,000 |
+| nanoFIL | 1,000,000,000 |
+| picoFIL | 1,000,000,000,000 |
+| femtoFIL | 1,000,000,000,000,000 |
+| attoFIL | 1,000,000,000,000,000,000 |
diff --git a/basics/assets/transfer-fil.md b/basics/assets/transfer-fil.md
new file mode 100644
index 000000000..2d95cad33
--- /dev/null
+++ b/basics/assets/transfer-fil.md
@@ -0,0 +1,122 @@
+---
+description: >-
+ Due to the nature of Filecoin and Ethereum having different address types in
+ the Filecoin network, the process for transfering FIL between addresses can be
+ a bit nuanced.
+---
+
+# Transfer FIL
+
+After FVM launched, a new Ethereum-compatible address type (`f410` address) was introduced to the Filecoin network. This new `f410` address can be converted into Ethereum-style addresses start with `0x` so that it could be used in any Ethereum-compatible toolings or dApps. Filecoin addresses start with `f` so we will use `f` address in this tutorial. And Ethereum-style addresses start with `0x` so we will use `0x` address in this tutorial.
+
+There are four paths for transferring FIL tokens across the Filecoin network depending on which address type you are transferring from and to.
+
+| | From an `0x` address | From a `f` address |
+| ---------------------- | ------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- |
+| **To an `0x` address** | [`0x` => `0x` address](https://docs.filecoin.io/basics/assets/transfer-fil/#eth-style-address-to-eth-style-address) | [`f` =>`0x` address](https://docs.filecoin.io/basics/assets/transfer-fil/#filecoin-to-eth-style-address) |
+| **To a `f` address** | [`0x` => `f` address](https://docs.filecoin.io/basics/assets/transfer-fil/#eth-style-address-to-filecoin) | [`f` => `f` address](https://docs.filecoin.io/basics/assets/transfer-fil/#filecoin-to-filecoin) |
+
+{% hint style="warning" %}
+**ASSETS ON THE FILECOIN NETWORK ARE NOT AVAILABLE ON ANY OTHER NETWORK.**\
+\
+Remember that Filecoin is fully compatible with Ethereum tools, like wallets. But that doesn’t mean you’re using the Ethereum network. These instructions transfer assets only within the Filecoin network. [Learn how to configure your Ethereum wallet on the Filecoin network](https://docs.filecoin.io/basics/assets/metamask-setup/).
+{% endhint %}
+
+## 0x => 0x address
+
+If you want to transfer FIL tokens from one `f4` address to another `f4` address using their corresponding `0x` addresses, you need to understand how to convert between `f4` and `0x` address.
+
+* If you have `f4` address, you can convert it to `0x` address using [Beryx Address converter](https://beryx.zondax.ch/address\_converter).
+* If you have `0x` address, you can directly search it on [Filfox Explorer](https://filfox.info/en) which will show `0x` address and corresponding `f4` address.
+
+Apart from that, you just need to follow the standard process using your preferred Ethereum-compatible wallet, like MetaMask, MethWallet, etc. For instance, [MetaMask has a simple guide](https://support.metamask.io/hc/en-us/articles/360015488931-How-to-send-tokens-from-your-MetaMask-wallet) for how to send Ethereum from one account to another.
+
+## 0x => f address
+
+If you want to transfer FIL tokens from Ethereum style `0x` address to other Filecoin address type, like `f1` or `f3` address, fellow the steps in [FilForwarder](https://docs.filecoin.io/smart-contracts/filecoin-evm-runtime/filforwader/) tutorial .
+
+## f => 0x address
+
+Most wallets and exchanges currently support Filecoin `f1` or `f3` addresses, and many of them already fully support `f4` and `0x` address, includes [OKX](https://www.okx.com/markets/prices/filecoin-fil), [Kraken](https://www.kraken.com/), [Btcturk](https://www.btcturk.com/), etc. But there are some exchanges are still implementing the support for `f4` addresses. If your preferred wallets and exchanges won’t let you directly transfer FIL to an `f4` or Ethereum-style `0x` address, We recommend filing a support issue with the exchange to help accelerate the support of `f4` addresses.
+
+The process for sending FIL from a Filecoin `f` address to an Ethereum-style `0x` address depends on the wallet or exchanged you use.
+
+### Ledger device
+
+Currently, Ledger Live has not supported `0x` or `f4` addresses yet, so you cannot directly use it to send FIL token to a `0x` or `f4` address. However, you can connect your Ledger device to the [Glif.io](https://www.glif.io/s) wallet and transfer FIL from a Filecoin `f1/f3` address to an Ethereum-style `0x` address. This method is more secure than the [Hot wallet](https://docs.filecoin.io/basics/assets/transfer-fil/#hot-wallet) method detailed below since your private keys never leave your Ledger device.
+
+In this method, you will connect your Ledger device to the [Glif.io](https://www.glif.io/) website and send FIL from your `f` address on the Ledger to an Ethereum-style `0x` address.
+
+1. Ensure your Ledger device is connected to your computer, then log in to the Ledger Live dashboard and update your Ledger device’s Filecoin app to version `0.22.9`.\
+
+
+ 
+2. Make sure Filecoin app is open on your Ledger wallet. Your Ledger should display **Filecoin ready**.
+3. Go to [Glif](https://glif.io) and click **Connect Wallet**.\
+
+
+ 
+4. Select **Ledger (Filecoin)** and unlock your Ledger device, selecting the Filecoin application.\
+
+
+ 
+5. Once connected, you should see the details of your Filecoin account stored on your Ledger. Click **Send FIL**.\
+
+
+ 
+6. Enter the `0x` address you wish to send to. Glif will automatically convert the `0x` address into an `f4` address.\
+
+
+ 
+7. Enter the amount of FIL you want to send. Click **Send**.
+8. Verify the information is correct and accept the transaction on your hardware device.
+9. The transferred FIL will show up at the Eth-style `0x` address once this transaction is finalized on-chain which will take 60 - 90 seconds.
+10. You can check the status of this transfer by clicking the transaction ID link.\
+
+
+ 
+
+You can also follow this [Guide: How to transfer FIL from Ledger to MetaMask (0x)](https://blog.filecointldr.io/guide-how-to-transfer-fil-from-ledger-to-metamask-0x-9760f869b28e).
+
+### Hot wallet
+
+A hot wallet is a cryptocurrency wallet that is always connected to the internet. They allow you to store, send, and receive tokens. Because hot wallets are always connected to the internet, they tend to be somewhat more vulnerable to hacks and theft than cold storage methods. However, they a generally easier to use than cold wallets and do not require any specific hardware like a Ledger device.
+
+If you want to transfer your FIL tokens from `f1\f3` to `0x` address, but the wallet or exchange you are using does not support `f4` and `0x` style address. Then you can create a _burner wallet_ using Glif, transfer FIL to the burner wallet, and then transfer FIL from the burner wallet to `0x` address on MetaMask.
+
+1. Navigate to [https://wallet.glif.io/](https://wallet.glif.io/). Create a **Burner wallets**.\
+
+
+ 
+2. Click **Create Seed Phase**. Write down your seed phrase somewhere safe. You can also copy or download the seed phrase. You will need it later.\
+
+
+ 
+3. Click **I’ve recorded my seed phrase**. Using your seed phrase, enter the missing words in the blank text fields.
+4. Click **Next**, and then **Connect**. The burner wallet is created
+5. In the upper left corner of your wallet dashboard, click on the double squares icon next to your address to copy it. Record this address. You will need it later.\
+
+
+ 
+6. From your main wallet account or exchange, transfer your FIL token to this address.
+7. Connect to MetaMask, copy your `0x` address.
+8. Once the funds appear in the burner wallet, click on **Send FIL**.\
+
+
+ 
+9. Enter the necessary information into the text fields:
+
+ * In the **Recipient** field, enter your `0x` style address. GLIF automatically converts it to an `f4` address.
+ * In the **Amount** field, enter the amount of FIL to send. Make sure you have enough FIL to cover the GAS cost.\
+
+
+ 
+10. Click **Send**. The FIL will arrive in your MetaMask wallet shortly.
+
+### Exchange
+
+If you are transferring FIL from any exchange to your `0x` address on MetaMask, make sure the exchange supports withdrawing FIL to `0x` or `f410` address. If not, you will need extra steps to withdraw FIL token to your `0x` address. Let’s take Coinbase as an example, you can follow the this [Guide: How to transfer FIL from Coinbase to a Metamask Wallet (0x)](https://filecointldr.io/article/guide-how-to-transfer-fil-from-coinbase-to-a-metamask-wallet-0x).
+
+## f to f address
+
+There are no special steps or requirements for sending Filecoin from one Filecoin-style address to another on the Filecoin network.
diff --git a/basics/assets/wallets.md b/basics/assets/wallets.md
new file mode 100644
index 000000000..2a624e23c
--- /dev/null
+++ b/basics/assets/wallets.md
@@ -0,0 +1,48 @@
+---
+description: >-
+ Wallets provide a way to securely store Filecoin, along with other digital
+ assets. These wallets consist of a public and private key, which work similar
+ to a bank account number and password.
+---
+
+# Wallets
+
+When someone sends cryptocurrency to your wallet address, the transaction is recorded on the blockchain network, and the funds are added to your wallet balance. Similarly, when you send cryptocurrency from your wallet to someone else’s wallet, the transaction is recorded on the blockchain network, and the funds are deducted from your wallet balance.
+
+There are various types of cryptocurrency wallets, including desktop, mobile, hardware, and web-based wallets, each with its own unique features and levels of security. It’s important to choose a reputable and secure wallet to ensure the safety of your digital assets.
+
+## Compatible wallets
+
+We do not provide technical support for any of these wallets. Please use caution when researching and using the wallets listed below. Wallets that have conducted third-party audits of their open-source code by a reputable security auditor are marked _recommended_ below.
+
+If you are already running your own lotus node, you can also [manage FIL wallets from the command line](https://lotus.filecoin.io/lotus/manage/manage-fil/).
+
+| Name | Description | Audited |
+| ---------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ------- |
+| [Ledger](https://support.ledger.com/hc/en-us/articles/4402721277329-Filecoin-FIL?support=true) | A multi-currency hardware wallet. _Recommended._ | Yes |
+| [Glif web wallet](https://wallet.glif.io) | Supports sending & receiving FIL. Can be integrated with a Ledger hardware device. _Recommended._ | Yes |
+| [Trust wallet](https://trustwallet.com/) | A multi-currency wallet, official wallet of Binance. | Unknown |
+| [ImToken](https://token.im/) | A multi-currency wallet. | Unknown |
+| [MathWallet](https://mathwallet.org/en-us/) | A multi-currency wallet. | Unknown |
+| [FoxWallet](https://foxwallet.com/) | A multi-currency mobile wallet by [Filfox](https://filfox.info/en). | Yes |
+| FilSnap MetaMask plugin | MetaMask has a plugin system called [Snaps](https://github.com/MetaMask/metamask-snaps-beta/wiki), currently in beta. | No |
+
+### Hot versus cold
+
+A hot wallet refers to any wallet that is permanently connected to the internet. They can be mobile, desktop, or browser-based. Hot wallets make it faster and easier to access digital assets but could be vulnerable to online attacks. Therefore, it is recommended to keep large balances in cold wallets and only use hot wallets to hold funds that need to be accessed frequently.
+
+Cold wallets most commonly refer to hardware wallet devices shaped like a USB stick. They are typically offline and only connected to the internet for transactions. Accessing a cold wallet typically requires physical possession of the device plus knowledge of the private key, which makes them more resistant to theft. Cold wallets can be less convenient and are most useful for storing larger balances securely.
+
+### Security
+
+Wallets that have gone through an audit have had their codebase checked by a recognized security firm for security vulnerabilities and potential leaks. However, just because a wallet has had an audit does not mean that it’s 100% bug-proof. Be incredibly cautious when using unaudited wallets.
+
+Never share your seed phrase, password, or private keys. Bad actors will often use social engineering tactics such as phishing emails or posing as customer service or tech support to lure users into handing over their private key or seed phrase.
+
+### Add a wallet to our list
+
+If you know of a wallet that supports Filecoin, you can submit a pull request to this page and add it!
+
+* Create an issue in [`filecoin-project/filecoin-docs`](https://github.com/filecoin-project/filecoin-docs) with the name of the wallet and its features.
+* If the wallet is a mobile wallet, it must be available on both Android and iOS.
+* The wallet must have been audited. The results of this audit must be public.
diff --git a/basics/how-retrieval-works/README.md b/basics/how-retrieval-works/README.md
new file mode 100644
index 000000000..2894312b2
--- /dev/null
+++ b/basics/how-retrieval-works/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ This section covers the very basics of how retrieving data works on the
+ Filecoin network.
+---
+
+# How retrieval works
+
diff --git a/basics/how-retrieval-works/basic-retrieval.md b/basics/how-retrieval-works/basic-retrieval.md
new file mode 100644
index 000000000..29b133550
--- /dev/null
+++ b/basics/how-retrieval-works/basic-retrieval.md
@@ -0,0 +1,150 @@
+---
+description: >-
+ There are multiple ways to fetch data from a storage provider. This pages
+ covers some of the most popular methods.
+---
+
+# Basic retrieval
+
+### Lassie
+
+Lassie is a simple retrieval client for IPFS and Filecoin. It finds and fetches your data over the best retrieval protocols available. Lassie makes Filecoin retrieval easy. While Lassie is powerful, the core functionality is expressed in a single CLI command:
+
+```shell
+lassie fetch
+```
+
+Lassie also provides an HTTP interface for retrieving IPLD data from IPFS and Filecoin peers. Developers can use this interface directly in their applications to retrieve the data.
+
+Lassie fetches content in content-addressed archive (CAR) form, so in most cases you will need additional tooling to deal with CAR files. Lassie can also be used as a library to fetch data from Filecoin from within your application. Due to the diversity of data transport protocols in the IPFS ecosystem, Lassie is able to use the Graphsync or Bitswap protocols, depending on how the requested data is available to be fetched. One prominent use case of Lassie as a library is the **Saturn Network**. Saturn nodes fetch content from Filecoin and IPFS through Lassie in order to serve retrievals.
+
+
+
+#### Retrieve using Lassie
+
+Make sure that you have [Go](https://go.dev/) installed and that your `GOPATH` is set up. By default, your `GOPATH` will be set to `~/go`.\
+**Install Lassie** [**#**](https://docs.filecoin.io/basics/how-retrieval-works/basic-retrieval/#install-lassie)
+
+1. Download the [Lassie Binary from the latest release](https://github.com/filecoin-project/lassie/releases/latest) based on your system architecture.
+
+ Or download and install Lassie using the Go package manager:
+
+```
+go install github.com/filecoin-project/lassie/cmd/lassie@latest
+```
+
+2. Download the [go-car binary from the latest release](https://github.com/ipld/go-car/releases/latest) based on your system architecture blocor install the [go-car](https://github.com/ipld/go-car) package using the Go package manager. The go-car package makes it easier to work with content-addressed archive (CAR) files:
+
+```
+go install github.com/ipld/go-car/cmd/car@latest
+```
+
+You now have everything you need to retrieve a file with Lassie and extract the contents with `go-car`.
+
+**Retrieve**
+
+To retrieve data from Filecoin using Lassie, all you need is the CID of the content you want to download.
+
+The video below demonstrates how Lassie can be used to render content directly from Filecoin and IPFS.
+
+Lassie and `go-car` can work together to retrieve and extract data from Filecoin. All you need is the CID of the content to download.
+
+```shell
+lassie fetch -o - | car extract
+```
+
+This command uses a `|` to chain two commands together. This will work on Linux or macOS. Windows users may need to use PowerShell to use this form. Alternatively, you can use the commands separately as explained later in this page.
+
+An example of fetching and extracting a single file, identified by its CID:
+
+```shell
+lassie fetch -o - bafykbzaceatihez66rzmzuvfx5nqqik73hlphem3dvagmixmay3arvqd66ng6 | car extract - > lidar-data.tar
+```
+
+Basic progress information, similar to the output show below, is displayed:
+
+```plaintext
+Fetching bafykbzaceatihez66rzmzuvfx5nqqik73hlphem3dvagmixmay3arvqd66ng6................................................................................................................................................
+Fetched [bafykbzaceatihez66rzmzuvfx5nqqik73hlphem3dvagmixmay3arvqd66ng6] from [12D3KooWPNbkEgjdBNeaCGpsgCrPRETe4uBZf1ShFXStobdN18ys]:
+ Duration: 42.259908785s
+ Blocks: 144
+ Bytes: 143 MiB
+extracted 1 file(s)
+```
+
+The resulting file is a tar archive:
+
+```shell
+ls -l
+# total 143M
+# -rw-rw-r-- 1 user user 143M Feb 16 11:21 lidar-data.tar
+```
+
+**Lassie CLI usage**
+
+Lassie usage for retrieving data is:
+
+```shell
+lassie fetch -p -o /path/to/content
+```
+
+* `-p` is an optional flag that tells Lassie that you would like to see detailed progress information as it fetches your data.
+
+ For example:
+* ```plaintext
+ Fetching bafykbzaceatihez66rzmzuvfx5nqqik73hlphem3dvagmixmay3arvqd66ng6
+ Querying indexer for bafykbzaceatihez66rzmzuvfx5nqqik73hlphem3dvagmixmay3arvqd66ng6...
+ Found 4 storage providers candidates from the indexer, querying all of them:
+ 12D3KooWPNbkEgjdBNeaCGpsgCrPRETe4uBZf1ShFXStobdN18ys
+ 12D3KooWNHwmwNRkMEP6VqDCpjSZkqripoJgN7eWruvXXqC2kG9f
+ 12D3KooWKGCcFVSAUXxe7YP62wiwsBvpCmMomnNauJCA67XbmHYj
+ 12D3KooWLDf6KCzeMv16qPRaJsTLKJ5fR523h65iaYSRNfrQy7eU
+ Querying [12D3KooWLDf6KCzeMv16qPRaJsTLKJ5fR523h65iaYSRNfrQy7eU] (started)...
+ Querying [12D3KooWKGCcFVSAUXxe7YP62wiwsBvpCmMomnNauJCA67XbmHYj] (started)...
+
+ ...
+ ```
+* `-o` is an optional flag that tells Lassie where to write the output to. If you don’t specify a file, it will append `.car` to your CID and use that as the output file name.
+
+If you specify `-`, as in our above example, the output will be written to `stdout` so it can be piped to another command, such as `go-car`, or redirected to a file.
+
+* `/path/to/content` is the CID of the content you want to retrieve, and an optional path to a specific file within that content. Example:
+* ```shell
+ lassie fetch -o - bafybeiaysi4s6lnjev27ln5icwm6tueaw2vdykrtjkwiphwekaywqhcjze/wiki/Cryptographic_hash_function | car extract - | less
+ ```
+
+A CID is always necessary and, if you don’t specify a path, Lassie will attempt to download the entire content. If you specify a path, Lassie will only download that specific file or, if it is a directory, the entire directory and its contents.
+
+**go-car CLI usage**
+
+The `car extract` command can be used to extract files and directories from a CAR:
+
+```shell
+car extract -f [/path/to/file/or/directory] []
+```
+
+* `-f` is an optional flag that tells `go-car` where to read the input from. If omitted, it will read from `stdin`, as in our example above where we piped `lassie fetch -o -` output to `car extract`.
+* `/path/to/file/or/directory` is an optional path to a specific file or directory within the CAR. If omitted, it will attempt to extract the entire CAR.
+* `` is an optional argument that tells `go-car` where to write the output to. If omitted, it will write to the current directory.
+
+If you supply `-`, as in the above example, it will attempt to extract the content directly to `stdout`. This will only work if we are extracting a single file.
+
+In the example above where we fetched a file named `lidar-data.tar`, the `>` operator was used to redirect the output of `car extract` to a named file,. This is because the content we fetched was raw file data that did not have a name encoded. In this case, if we didn’t use `-` and `> filename`, `go-car` would write to a file named `unknown`. In this instance `go-car` was used to reconstitute the file from the raw blocks contained within Lassie’s CAR output.
+
+`go-car` has other useful commands. The first is `car ls`, which can be used to list the contents of a CAR, The second is `car inspect`, which can be used to inspect the contents of the CAR, and optionally verify the integrity of a CAR.
+
+And there we have it! Downloading and managing data from Filecoin is super simple when you use Lassie and Go-car!
+
+#### Lassie HTTP daemon
+
+The Lassie HTTP daemon is an HTTP interface for retrieving IPLD data from IPFS and Filecoin peers. It fetches content from peers known to have it, and provides the resulting data in CAR format.
+
+```shell
+GET /ipfs/{cid}[/path][?params]
+```
+
+A `GET` query against a Lassie HTTP daemon allows retrieval from peers that have the content identified by the given root CID, streaming the DAG in the response in [CAR (v1)](https://ipld.io/specs/transport/car/carv1/) format. You can read more about the HTTP request and response to the daemon in [Lassie’s HTTP spec](https://github.com/filecoin-project/lassie/blob/main/docs/HTTP\_SPEC.md). Lassie’s HTTP interface can be a very powerful tool for web applications which require fetching data from Filecoin and IPFS.
+
+#### Lassie’s CAR format
+
+Lassie only returns data in CAR format; specifically, [CARv1](https://ipld.io/specs/transport/car/carv1/) format. [Lassie’s car spec](https://github.com/filecoin-project/lassie/blob/main/docs/CAR.md) describes the nature of the CAR data returned by Lassie and the various options available to the client for manipulating the output.
diff --git a/basics/how-retrieval-works/saturn.md b/basics/how-retrieval-works/saturn.md
new file mode 100644
index 000000000..0b2de4132
--- /dev/null
+++ b/basics/how-retrieval-works/saturn.md
@@ -0,0 +1,19 @@
+---
+description: >-
+ Filecoin Saturn is an open-source, community-run Content Delivery Network
+ (CDN) built on Filecoin.
+---
+
+# Saturn
+
+https://saturn.tech/
+
+Saturn is a Web3 CDN in Filecoin’s retrieval market. On one side of the network, websites buy fast, low-cost content delivery. On the other side, Saturn node operators earn Filecoin by fulfilling requests.
+
+Saturn is trustless, permissionless, and inclusive. Anyone can run Saturn software, contribute to the network, and earn Filecoin.
+
+Content on Saturn is IPFS content-addressed. Every piece of content is immutable and every response verifiable.
+
+Incentives unite, align, and grow the network. Node operators earn Filecoin for accelerating web content and websites get faster content delivery for less.
+
+Find out more over at [saturn.tech](https://saturn.tech).
diff --git a/basics/how-retrieval-works/serving-retrievals.md b/basics/how-retrieval-works/serving-retrievals.md
new file mode 100644
index 000000000..447bfc8de
--- /dev/null
+++ b/basics/how-retrieval-works/serving-retrievals.md
@@ -0,0 +1,36 @@
+---
+description: >-
+ In this article, we will discuss the functions of storage providers in the
+ Filecoin network, the role of the indexer, and the retrieval process for
+ publicly available data.
+---
+
+# Serving retrievals
+
+### The indexer
+
+When a storage deal is originally made, the client can opt to make the data publicly discoverable. If this is the case, the storage provider must publish an advertisement of the storage deal to the Interplanetary Network Indexer (IPNI). IPNI maps a CID to a storage provider (SP). This mapping allows clients to query the IPNI to discover where content is on Filecoin.
+
+The IPNI also tracks which data transfer protocols you can use to retrieve specific CIDs. Currently, Filecoin SPs have the ability to serve retrievals over Graphsync, Bitswap, and HTTP. This is dependent on the SP setup.
+
+### Retrieval process
+
+If a client wants to retrieve publicly available data from the Filecoin network, then they generally follow this process.
+
+#### Query the IPNI
+
+Before the client can submit a retrieval deal to a storage provider, they first need to find which providers hold the data. To do this, the client sends a query to the Interplanetary Network Indexer.
+
+#### Select a provider
+
+Assuming the IPNI returns more than one storage provider, the client can select which provider they’d like to deal with. Here, they will also get additional details (if needed) based on the retrieval protocol they want to retrieve the content over.
+
+#### Initiate retrieval
+
+The client then attempts to retrieve the data from the SP over Bitswap, Graphsync, or HTTP. Note that currently, clients can only get full-piece retrievals using HTTP.
+
+When attempting this retrieval deal using Graphsync, payment channels are used to pay FIL to the storage provider. These payment channels watch the data flow and pay the storage provider after each chunk of data is retrieved successfully.
+
+#### Finalize the retrieval
+
+Once the client has received the last chunk of data, the connection is closed.
diff --git a/basics/how-storage-works/README.md b/basics/how-storage-works/README.md
new file mode 100644
index 000000000..fad8709d3
--- /dev/null
+++ b/basics/how-storage-works/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ This section covers the very basics of storing data works on the Filecoin
+ network.
+---
+
+# How storage works
+
diff --git a/basics/how-storage-works/filecoin-and-ipfs.md b/basics/how-storage-works/filecoin-and-ipfs.md
new file mode 100644
index 000000000..4dde3eea6
--- /dev/null
+++ b/basics/how-storage-works/filecoin-and-ipfs.md
@@ -0,0 +1,74 @@
+---
+description: >-
+ Explore the features that make Filecoin a compelling system for storing files.
+ This is an overview of features offered by Filecoin that make it a compelling
+ system for storing files.
+---
+
+# Filecoin and IPFS
+
+#### Verifiable storage
+
+Filecoin has built-in processes to check the history of files and verify that they have been stored correctly over time. Every storage provider proves that they are maintaining their files in every 24 hour window. Clients can efficiently scan this history to confirm that their files have been stored correctly, even if the client was offline at the time. Any observer can check any storage provider’s track record and will notice if the provider has been faulty or offline in the past.
+
+[Learn about storage verification at ProtoSchool](https://proto.school/#/verifying-storage-on-filecoin)
+
+#### Open market
+
+In Filecoin, file storage and retrieval deals are negotiated in open markets. Anybody can join the Filecoin network without needing permission. Running a storage provider only requires an internet connection and spare disk space. By lowering the barriers to entry, Filecoin enables a thriving ecosystem of many independent storage providers.
+
+#### Competitive prices
+
+Prices for storage and retrieval are determined by supply and demand, not corporate pricing departments. Filecoin makes reliable storage available at hyper-competitive prices. Miners compete based on their storage, reliability, and speed rather than through marketing or locking users in.
+
+#### Reliable storage
+
+Because storage is paid for, Filecoin provides a viable economic reason for files to stay available over time. Files are stored on computers that are reliable and well-connected to the internet.
+
+#### Reputation, not marketing
+
+In Filecoin, storage providers prove their reliability through their track record published on the blockchain, not through marketing claims published by the providers themselves. Users don’t need to rely on status pages or self-reported statistics from storage providers.
+
+#### Choice of tradeoffs
+
+Users get to choose their own tradeoffs between cost, redundancy, and speed. Users are not limited to a set group of data centers offered by their provider but can choose to store their files on any storage provider participating in Filecoin.
+
+#### Censorship resistance
+
+Filecoin resists censorship because no central provider can be coerced into deleting files or withholding service. The network is made up of many different computers run by many different people and organizations. Faulty or malicious actors are noticed by the network and removed automatically.
+
+#### Useful blockchain
+
+In Filecoin, storage providers are rewarded for providing storage, not for performing wasteful computations. Filecoin secures its blockchain using proof of file replication and proof of storage over time. It doesn’t rely on energy-intensive proof-of-work schemes like other blockchains. Miners are incentivized to amass hard drives and put them to use by storing files. Filecoin doesn’t incentivize hoarding of graphics cards or application-specific integrated circuits for the sole purpose of mining.
+
+#### Provides storage to other blockchains
+
+Filecoin’s blockchain is designed to store large files, whereas other blockchains can typically only store tiny amounts of data, very expensively. Filecoin can provide storage to other blockchains, allowing them to store large files. In the future, mechanisms will be added to Filecoin, enabling Filecoin’s blockchain to interoperate with transactions on other blockchains.
+
+#### Content addressing
+
+Files are referred to by the data they contain, not by fragile identifiers such as URLs. Files remain available no matter where they are hosted or who they are hosted by. When a file becomes popular, it can be quickly distributed by swarms of computers instead of relying on a central computer, which can become overloaded by network traffic.
+
+When multiple users store the same file (and choose to make the file public by not encrypting it), everyone who wants to download the file benefits from Filecoin keeping it available. No matter where a file is downloaded from, users can verify that they have received the correct file and that it is intact.
+
+#### Content distribution network
+
+Retrieval providers are computers that have good network connections to lots of users who want to download files. By prefetching popular files and distributing them to nearby users, retrieval providers are rewarded for making network traffic flow smoothly, and files download quickly.
+
+#### Single protocol
+
+Applications implementing Filecoin can store their data on any storage provider using the same protocol. There isn’t a different API to implement for each provider. Applications wishing to support several different providers aren’t limited to the lowest-common-denominator set of features supported by all their providers.
+
+#### No lock-in
+
+Migrating to a different storage provider is made easier because they all offer the same services and APIs. Users aren’t locked into providers because they rely on a particular feature of the provider. Also, files are content-addressed, enabling them to be transferred directly between providers without the user having to download and re-upload the files.
+
+Traditional cloud storage providers lock users by making it cheap to store files but expensive to retrieve them again. Filecoin avoids this by facilitating a retrieval market where providers compete to give users their files back as fast as possible, at the lowest possible price.
+
+#### Open source code
+
+The code that runs both clients and storage providers is open-source. Storage providers don’t have to develop their own software for managing their infrastructure. Everyone benefits from improvements made to Filecoin’s code.
+
+#### Active community
+
+Filecoin has an active community of contributors to answer questions and help newcomers get started. There is an open dialog between users, developers, and storage providers. If you need help, you can reach the person who designed or built the system in question. Reach out on [Filecoin’s chat and forums](https://docs.filecoin.io/basics/project-and-community/chat-and-discussion-forums/).
diff --git a/basics/how-storage-works/filecoin-plus.md b/basics/how-storage-works/filecoin-plus.md
new file mode 100644
index 000000000..5be641eac
--- /dev/null
+++ b/basics/how-storage-works/filecoin-plus.md
@@ -0,0 +1,149 @@
+---
+description: >-
+ Notaries, clients and storage providers interact through the allocation and
+ spending of DataCap. Filecoin Plus increases the effectiveness of the network.
+---
+
+# Filecoin plus
+
+### Concepts
+
+Filecoin Plus is based on a set of guiding principles, [detailed in Filecoin Improvement Proposal (FIP) 0003](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0003.md), that focus the program on increasing Filecoin’s effectiveness at becoming the decentralized storage network for humanity’s most important information.
+
+Root key-holders, notaries, clients, and storage providers, interact through the allocation and spending of DataCap . Notaries retrieve DataCap in batches and then allocate it to trustworthy clients that spend the DataCap to fund storage deals. Storage providers that receive DataCap receive a 10x boost to their quality-adjusted power for the storage space offered in that deal, which increases their block reward share in the network. This creates a mechanism that incentivizes all participants to make Filecoin more useful.
+
+#### DataCap
+
+DataCap, when allocated to a client, can be spent by the client in storage deals with storage providers. Those deals carry a higher deal quality multiplier, which increases the “quality adjusted power” of the storage provider on the network by a factor of 10, yielding better block rewards for the storage provider over time. DataCap is granted in batches to Notaries, who can allocate it to clients that spend the DataCap to fund storage deals. DataCap is consumed as it is used to make deals.
+
+#### Notary
+
+Notaries are selected to serve as fiduciaries for the Filecoin Network and are responsible for allocating DataCap to clients with valuable storage use cases. The base responsibilities of notaries include:
+
+* Allocate DataCap responsibly to clients to subsidize reliable and valuable storage on the network.
+* Ensure that in the allocation of the DataCap, no party is given excessive trust in any form that might jeopardize the network.
+* Follow operational guidelines, keep a record of decision flow, and respond to any requests for audits of their allocation decisions.
+
+You can find a list of current [active notaries at plus.fil.org](https://plus.fil.org).
+
+Notaries are selected through an [application process](https://github.com/filecoin-project/notary-governance/tree/main/notaries#application--selection-process). If approved, [root key-holders](https://github.com/filecoin-project/notary-governance/tree/main/root-key-holders#overview) (executors of the decisions made by the community on-chain) grant notary status and DataCap amounts. Those interested in becoming Notaries should apply for this role by filing an Issue in the [notary governance repository](https://github.com/filecoin-project/notary-governance/).
+
+#### Storage client
+
+Clients can use _DataCap_ to incentivize storage providers to serve their needs. This can include providing additional features and levels of services that meet their specific requirements. In doing so, storage-related goods and services on Filecoin are made more valuable and competitive over time. Notaries vet clients to ensure the client receives DataCap commensurate with their reputation and needs and that the client responsibly allocates that DataCap.
+
+**Smart contracts**
+
+Smart contracts can acquire and use DataCap just like any regular client. To do so, simply enter the `f410` address of the smart contract as the client address when making a request for DataCap.
+
+For a smart contract’s first DataCap allocation, we recommend using [verify.glif.io](https://verify.glif.io) to get 32 GiB of DataCap easily, as outlined below.
+
+It’s important to note that DataCap allocations are a one-time credit for a Filecoin address and cannot be transferred between smart contracts. If you need to redeploy the smart contract, you must request additional DataCap. To improve this experience, we are developing an Filecoin request for comments (FRC) to allow DataCap to be held between redeployments.
+
+### Using DataCap
+
+#### Get DataCap
+
+Clients are required to have an on-chain Filecoin address where DataCap can be received. If you are setting up a new address, make sure to initialize it by sending a minimal amount of FIL to the address by purchasing some FIL from an exchange, for example. You will need an address to proceed with getting DataCap in any of the following ways.
+
+_Note: As of network version 12, DataCap allocations are a single-use credit on a Filecoin address. If you receive an allocation and require more, you should make a new request with a unique address that you have initialized like above._ [_Filecoin Improvement Proposal (FIP) 0012_](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0012.md) _was accepted and implemented in network version 13(actor v5), which allows client addresses to receive DataCap multiple times._
+
+Clients get DataCap by making a request to a notary. For your first DataCap allocation of 32GiB, you can use an auto-verifier such as [Verifier](https://verify.glif.io/). Auto-verifiers exist to grant DataCap immediately to clients that can authenticate themselves via a specific method. For example, the verify.glif.io automatic notary grants DataCap to clients who have a GitHub account that is > 180 days old and has not been used at this site in the past 30 days.
+
+1. Head over to [Verifier](https://verify.glif.io/).
+2. Connect your GitHub account - click the **Start** button on the top right of the page
+3. Sign in to GitHub if you have not already
+4. Paste in the address to which you’d like to receive DataCap in the box under “Request” and hit **Request**
+5. The auto-notary will now attempt to send a message to the Filecoin Network whereby your address will be granted 32GiB of DataCap. This will take about 1 min
+6. When complete, you now have 32GiB to start making deals with! You can always come back to this site and use the “Check” box to see how much DataCap you have left on a specific address
+
+For receiving DataCap at a larger scale (for business needs and production use cases), depending on the amount of data being onboarded to the network, a client has two options:
+
+* Applying directly to a specific notary - best for clients looking for < 100TiB of DataCap
+* Applying for a Large Dataset notary dedicated to a specific project - best for clients looking for > 100TiB of DataCap (usually in the 500TiB-5PiB range)
+
+For applying directly to a specific notary:
+
+1. Head over to the [Fil+ Registry](https://plus.fil.org/), and proceed with **For Clients**
+2. Click **Get Verified**
+3. Click on **General Verification**. This link will take you to the notary registry, where you can request DataCap to a specific notary. Notaries may specialize in the types of requests they’ll choose to support. It is recommended that you select a notary in your region of operation that also covers the general category of _Use Case_ you would classify yourself under
+4. Identify the notary you would like to apply to by selecting the checkbox on their row, and click **Make Request**
+5. Fill out the form that should pop up. This is used by notaries in conducting any necessary due diligence before granting you the requested DataCap
+6. Click **Sign in with GitHub** to allow the app to create a GitHub issue on your behalf
+7. After you are signed in, the button should change to **Send request**. Click this to have an issue created on your behalf and sent to the right notary!
+
+Each request is tracked as a GitHub issue in the [Fil+ Client onboarding repository](https://github.com/filecoin-project/filecoin-plus-client-onboarding). You can follow the progress of your application there as well. Notaries may ask for additional information before they will allocate you DataCap.
+
+To apply for a Large Dataset Notary, follow the steps at [Applying for a large DataCap allocation](https://github.com/filecoin-project/filecoin-plus-large-datasets#applying-for-a-large-datacap-allocation).
+
+#### Spend DataCap
+
+Once you have an address with DataCap, you can make deals using DataCap as a part of the payment. Because storage providers receive a deal quality multiplier for taking Fil+ deals, many storage providers offer special pricing and services to attract clients who use DataCap to make deals.
+
+By default, when you make a deal with an address with DataCap allocated, you will spend DataCap when making the deal.
+
+If making deals through the [API](https://github.com/filecoin-project/lotus/blob/master/documentation/en/api-v0-methods.md#ClientStartDeal), make sure that the `VerifiedDeal` parameter is set to `true` when calling `ClientStartDeal`.
+
+```json
+[
+ {
+ "Data": {
+ "TransferType": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceCid": null,
+ "PieceSize": 1024
+ },
+ "Wallet": "f01234",
+ "Miner": "f01234",
+ "EpochPrice": "0",
+ "MinBlocksDuration": 42,
+ "ProviderCollateral": "0",
+ "DealStartEpoch": 10101,
+ "FastRetrieval": true,
+ "VerifiedDeal": true
+ }
+]
+```
+
+If making deals from the command line, make sure to pass the flag `--verified-deal=true` as a parameter.
+
+```shell
+ lotus client deal --verified-deal=true
+```
+
+#### Checking remaining DataCap balance
+
+Once you have received DataCap to an address, you can check the remaining balance either by visiting a site that enables this (e.g. [verify.glif.io](https://verify.glif.io/)) or by querying your address on a node.
+
+**With lotus v1.10.0 ^**
+
+```shell
+lotus filplus check-client-datacap f00000
+```
+
+**With lotus v1.9.0 and below**
+
+_Note:_ [_Lotus-shed_](https://github.com/filecoin-project/lotus/tree/master/cmd/lotus-shed) _is a separate package you will need to build and install (`make lotus-shed` in the_ [_Lotus_](https://github.com/filecoin-project/lotus) _source), although these features are slated to be merged into Lotus._
+
+```shell
+lotus-shed verifreg check-client f00000
+```
+
+#### Finding storage providers to take Fil+ deals
+
+There are a few different ways in which a client can find a storage provider to take a Fil+ storage deal:
+
+1. In the **For Clients** section of the [Fil+ Registry](https://filplus.fil.org/), there is a [Miner Registry](https://filplus.fil.org/#/miners) which lists a self-selected set of storage providers who are willing to take Fil+ storage deals
+2. Use a Miner Reputation system such as [Filecoin Reputation System](http://filrep.io/) or Textile’s [Miner Index](https://docs.textile.io/filecoin/miner-index/) to identify storage providers who can meet your needs
+3. Join the [#fil-plus](https://filecoinproject.slack.com/archives/C01DLAPKDGX) channel on Filecoin Slack to discuss storage options
+4. Hop into the network with your node and query storage providers (using `query-ask`) to check their verified deal prices
+
+### Get involved in Fil+ governance
+
+If you are interested in participating in governance and shaping the program, here is how you can get involved:
+
+* Join the [#fil-plus](https://filecoinproject.slack.com/archives/C01DLAPKDGX) channel on Filecoin Slack.
+* Participate in FIL notary community governance calls, which happen every other Tuesday. Use the [Filecoin Community Events Calendar](https://calendar.google.com/calendar/u/1?cid=Y19rMWdrZm9vbTE3ZzBqOGM2YmFtNnVmNDNqMEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t). to join or watch for updates in #fil-plus
+* Create and comment on open issues in the [notary governance repository](https://github.com/filecoin-project/notary-governance/issues).
diff --git a/basics/how-storage-works/storage-onramps.md b/basics/how-storage-works/storage-onramps.md
new file mode 100644
index 000000000..196a710ca
--- /dev/null
+++ b/basics/how-storage-works/storage-onramps.md
@@ -0,0 +1,27 @@
+---
+description: >-
+ Storage on-ramps and helpers are APIs and services that abstract Filecoin
+ dealmaking into simple, streamlined API calls.
+---
+
+# Storage onramps
+
+Here’s how they work: Developers use APIs or libraries to send data to storage helpers. Behind the scenes, storage helpers receive the data and handle the underlying processes to store it in a reliable and decentralized storage way, by saving it [IPFS](https://ipfs.tech) nodes, making deals with Filecoin storage providers – or both. You can use the same APIs or other tools to retrieve data quickly.
+
+Storage helpers are available for NFTs (non-fungible tokens) or general data. If you are storing NFTs, check out [Storing NFTs](storage-onramps.md#storing-nfts). For general data, skip to [General data storage](storage-onramps.md#general-data-storage).
+
+#### Storing NFTs
+
+* [NFT.Storage](https://nft.storage/) offers free, long-term storage for your NFT metadata and assets. It uses CIDs (content identifiers) so you can create truly immutable NFTs and avoid situations where files are accidentally (also known as “rug pulls”). Data uploads of up to 31 GiB per file are possible. NFT.Storage has a JavaScript library, HTTP API, and browser-based uploader. There are also clients in Go, Java, PHP, Python, Ruby, and Rust automatically generated via OpenAPI.
+
+#### General data storage
+
+* [Chainsafe Storage API](https://docs.storage.chainsafe.io) is an underlayer to Chainsafe’s encrypted IPFS & Filecoin file storage system. It offers S3-compatible bucket style APIs for easy migration of data. As of September 2022, it’s the only storage helper with built-in encryption.
+* [Estuary](https://docs.estuary.tech) is a free, decentralized data storage service for IPFS and Filecoin. Users can store and retrieve content quickly, and have their data backed up with proposition receipts and successful deal receipts. Estuary’s API adheres to the IPFS Pinning Services API Spec. You can use Estuary via its HTTP API or rclone tool for syncing. Note: Estuary is in alpha mode. Users wanting to store meaningful public data can apply for an API token.
+* [Web3.Storage](https://web3.storage/docs) is a fast and open developer platform for [storing and interacting with data](https://blog.web3.storage/posts/say-hello-to-the-data-layer-1-3-intro-to-web3-storage). Upload any data, and Web3.Storage will ensure it ends up on a decentralized set of IPFS and Filecoin storage providers. There are JavaScript and Go libraries for the API, as well as a no-code web uploader. Free and paid plans are available.
+
+#### Advanced tools
+
+The following tools offer more customization and configuration options. You can choose specific individual storage providers, customize pricing, and more. Note that you may have to manage storage deals individually, including designing your own redundancy plans, keeping track of expiring deals, and renewing them and more.
+
+* [Textile Powergate](https://docs.textile.io/powergate/) combines IPFS and Filecoin nodes directly, and offers advanced configuration options such as miner selection, replication factor, deal renewal, and repair. It includes JavaScript and Go libraries, and administrative APIs to create and manage users.
diff --git a/basics/interplanetary-consensus/README.md b/basics/interplanetary-consensus/README.md
new file mode 100644
index 000000000..200a89092
--- /dev/null
+++ b/basics/interplanetary-consensus/README.md
@@ -0,0 +1,53 @@
+---
+description: >-
+ Interplanetary Consensus (IPC) is a framework enables on-demand horizontal
+ scalability of Filecoin by deploying subnets running different consensus
+ algorithms depending on application.
+---
+
+# Interplanetary consensus
+
+Many blockchain networks, including Filecoin, require that all validators process all transactions. This creates a bottleneck and makes it challenging to increase network performance by scaling out. On top of that, different applications have different performance and security requirements, making it difficult for a single consensus layer to accommodate the needs of all Web3 applications.
+
+Interplanetary Consensus (IPC) is a framework that enables on-demand, horizontal scalability of Filecoin by deploying subnets that:
+
+* Spawn their own state.
+* Validate messages in parallel.
+* Interact with any other subnet in the hierarchy, as well as with the Filecoin root network.
+
+Subnets can run different consensus algorithms depending on application requirements, and are particularly well suited to scenarios requiring fast finality.
+
+{% embed url="https://www.youtube.com/watch?v=aRyj9kOvW7I" %}
+Check out this short introduction from [ConsensusLab](https://consensuslab.world/)
+{% endembed %}
+
+### Use cases
+
+The introduction of IPC enables the following use cases on the Filecoin network:
+
+* **Computation**: Spawn ephemeral subnets to run distributed computation jobs.
+* **Coordination**: Assemble into smaller subnets for decentralized orchestration with high throughput and low fees.
+* **Localization**: Leverage proximity to improve performance and operate with very low latency in geographically constrained settings.
+* **Partition tolerance**: Deploy blockchain substrates in mobile settings or other environments with limited connectivity.
+
+### Components
+
+The IPC project is split into two main components, which we briefly describe here:
+
+* The hierarchical consensus framework, consisting of the base protocol, the actors, the ipc-agent, and the Eudico node.
+* The Trantor consensus algorithm, implemented atop the Mir framework.
+
+### Public testnet
+
+[Spacenet](../../networks/spacenet/) is an early builder testnet for Interplanetary Consensus, Mir, and Trantor. It provides high throughput and low latency with FEVM support.
+
+### Support
+
+The [IPC Agent repository](https://github.com/consensus-shipyard/ipc-agent) features extensive documentation on how to get started with IPC. We recommend that you start your journey there.
+
+If you have questions, ideas, or wish to get involved, join the [Filecoin Slack workspace](https://filecoin.io/slack/) and meet us in the following channels:
+
+* `#ipc-help`, for any questions and as a general entry point to the world of IPC.
+* `#ipc-announcements`, for relevant announcements related to the software and network.
+* `#ipc-dev`, for development discussions.
+* `#ipc-docs`, for documentation discussions.
diff --git a/basics/interplanetary-consensus/hierarchical-consensus.md b/basics/interplanetary-consensus/hierarchical-consensus.md
new file mode 100644
index 000000000..9b7ed89e2
--- /dev/null
+++ b/basics/interplanetary-consensus/hierarchical-consensus.md
@@ -0,0 +1,37 @@
+---
+description: >-
+ IPC subnets are organized as a tree and periodically checkpoint their state
+ onto their parent to leverage its security.
+---
+
+# Hierarchical consensus
+
+### Overview
+
+In IPC, subnets are organized in a hierarchy, with each subnet having its consensus instance, cryptoeconomic rules, and agreement algorithm. This design increases the network’s capacity and accommodates new applications with varying consensus requirements.
+
+Subnets periodically save their state by checkpointing it with their parent network. They are also able to interact with other subnets via cross-net transactions. IPC subnets can resemble other L2 platforms, such as [optimistic rollups](https://ethereum.org/en/developers/docs/scaling/optimistic-rollups/), [ZK rollups](https://ethereum.org/en/developers/docs/scaling/zk-rollups/), or a sidechain with a native communication bridge.
+
+A complete overview of the architecture can be found in the [IPC Design Reference](https://github.com/consensus-shipyard/IPC-design-reference-spec/raw/main/main.pdf).
+
+### IPC Agent
+
+The IPC Agent is the entry point to interacting with IPC. It is a client application that provides a simple and easy-to-use interface to interact with IPC as a user and run all the processes required to operate a subnet. The agent acts as an orchestrator, connecting to one blockchain node in each relevant subnet. It also handles the entire IPC workflow, including subnet creation, cross-net message passing, and checkpointing.
+
+More information on the IPC Agent, as well as comprehensive documentation, can be found on [GitHub](https://github.com/consensus-shipyard/ipc-agent).
+
+### IPC Actors
+
+IPC relies on two actors, the _IPC Subnet Actor (ISA)_ and the _IPC Gateway Actor (IGA)_, which are instantiated in each subnet and provide convenience and governance functions.
+
+The IGA is an actor that contains all IPC-related information and logic associated with a subnet that needs to be replicated within the subnet. The ISA is the IGA’s parent-side counterpart; that is, it is deployed to a subnet’s parent and contains all data and logic associated with the particular child subnet.
+
+The [actors](https://github.com/consensus-shipyard/ipc-actors) are currently implemented in Rust and provided as built-in WASM actors in [Spacenet](../../networks/spacenet/). We are currently working on a Solidity implementation.
+
+### Eudico
+
+Eudico is a modularized implementation of [Lotus](../../nodes/implementations/lotus.md), itself an implementation of the Filecoin Distributed Storage Network. It is designed with the flexibility to support different use cases and, particularly relevant to IPC, the ability to load different consensus protocols.
+
+In our architecture, Eudico is used as the blockchain node. A separate instance of Eudico is run for each subnet in which a given node participates.
+
+The Eudico node is available on [GitHub](https://github.com/consensus-shipyard/lotus).
diff --git a/basics/interplanetary-consensus/ipc-agent.md b/basics/interplanetary-consensus/ipc-agent.md
new file mode 100644
index 000000000..241b20070
--- /dev/null
+++ b/basics/interplanetary-consensus/ipc-agent.md
@@ -0,0 +1,618 @@
+---
+description: >-
+ The IPC Agent is the entrypoint to interacting with IPC. It is a client
+ application that provides a simple and easy-to-use interface to interact with
+ IPC
+---
+
+# IPC agent
+
+The IPC Agent is the entry point to interacting with IPC. It is a client application that provides a simple and easy-to-use interface to interact with IPC as a user and run all the processes required for the operation of a subnet. See the [GitHub docs](https://github.com/consensus-shipyard/ipc-agent/tree/main/docs) for a conceptual overview.
+
+### Installation
+
+#### Prerequisites
+
+To build the IPC Agent you need to have Rust installed in your environment. The current MSRV (Minimum Supported Rust Version) is nightly-2022-10-03 due to some test build dependencies. A working version is tracked in rust-toolchain (this is picked up by rustup automatically). You can look for instructions on [how to run Rust and rustup following this link](https://www.rust-lang.org/tools/install).
+
+{% hint style="info" %}
+️According to the operating system you are running, you may have to install additional dependencies not installed in your system to follow these instructions like `build-essentials`, `libssl-dev`, `git`, `curl`. If something fails while building the binaries double-check these dependencies.
+{% endhint %}
+
+#### Build instructions
+
+To build the binary for the IPC agent you need to build the requirements in your environment, clone this repository, and build the binary following these steps:
+
+```shell
+git clone https://github.com/consensus-shipyard/ipc-agent.git
+cd ipc-agent
+rustup target add wasm32-unknown-unknown
+make build
+```
+
+This builds the binary of the IPC agent in the `./bin` folder of your repository. If you want to make the command available everywhere, add this folder to the binary `PATH` of your system. To see if the installation was successfully you can run the following command:
+
+```shell
+./bin/ipc-agent --help
+```
+
+### Infrastructure
+
+IPC currently uses [a fork of Lotus](https://github.com/consensus-shipyard/lotus), that we like to call _Eudico_, to run its subnets. The IPC agent does nothing by itself, and is just an orchestrator over existing subnet deployments. To ease the deployment of new subnets and nodes, we provide a set of convenient scripts to deploy all the infrastructure required to run IPC.
+
+#### Install infrastructure scripts
+
+[Eudico](https://github.com/consensus-shipyard/lotus/tree/spacenet/scripts/ipc) provides a set of infrastructure scripts, which assume a working installation of Docker. To install Docker [follow this link](https://docs.docker.com/get-docker/) and choose your working environment.
+
+{% hint style="info" %}
+Some users have reported issues trying to build the required images using Docker Desktop, if this is the case, try installing a version of [Docker engine](https://docs.docker.com/engine/install/#server) supported by your system.
+{% endhint %}
+
+With Docker installed, you can then `make install-infra` in the root of the `ipc-agent`. This make rule will clone the Eudico repository, build the docker image that you need to run subnets, and install the infrastructure scripts in the `./bin` folder. In Unix-based systems, it is highly recommended to include your user inside the `docker` group to avoid having to run many of the commands from this tutorial using `sudo`. You can achieve this running the following commands:
+
+```shell
+sudo usermod -aG docker $USER newgrp docker
+newgrp docker
+```
+
+To check if the installation of the image and all infra-related scripts was successful you can run the following command, and it should return a similar output:
+
+```shell
+docker images | grep eudico
+# eudico latest 8fb6db609712 2 minutes ago 341MB
+```
+
+### Usage
+
+#### Configuration
+
+If you are running the agent for the first time, the first thing you need to do is to create a new config. The default config path for the agent is `~/.ipc-agent/config.toml`. The agent will always try to pick up the config from this path unless told otherwise. To populate a sample config file in the default path, you can run the following command:
+
+```shell
+./bin/ipc-agent config init
+```
+
+If you `cat ~/.ipc-agent/config.toml` you should see a new config populated with a sample root and subnet configurations.
+
+#### Running the daemon
+
+The IPC agent runs as a foreground daemon process that spawns a new JSON RPC server to interact with it, and all the processes to automatically handle checkpoints and the execution of cross-net messages for the subnets our agent is participating in. The agent determines the list of subnets it should interact with from its config file.
+
+Alternatively, the agent can also be used as a CLI to interact with IPC. Under the hood, this CLI sends new commands to the RPC server of the daemon. To run the IPC agent daemon you can run:
+
+```shell
+./bin/ipc-agent daemon
+```
+
+Running the agent at this point will throw an error, because we haven´t configured it to interact with any IPC network. In the next few sections we will walk you through different alternatives to spawn and connect your agent to a running IPC instance.
+
+The RPC server of the daemon will be listening to the endpoint determined in the `json_rpc_address` field of the config. If you are looking for your agent to be accessible from Docker or externally, remember to listen from `0.0.0.0` instead of `127.0.0.1` as specified in the empty configuration.
+
+### Interacting with a rootnet
+
+#### Spacenet
+
+For more information about the Spacenet testnet have a look at the [Spacenet repository](https://github.com/consensus-shipyard/spacenet). In this section we will guide you through how to connect your IPC agent to a running instance of Spacenet. Spacenet hosts all the IPC actors and can be used as a rootnet to deploy new subnets from.
+
+In order to use the IPC agent with Spacenet we need to have access to a full-node syncing with the network. The easiest way to achieve this is to run your own Spacenet node. Running you own Spacenet node is as simple as [installing the dependencies](https://github.com/consensus-shipyard/lotus#basic-build-instructions), cloning the Eudico repository, and compiling and running it:
+
+```shell
+git clone https://github.com/consensus-shipyard/lotus
+cd lotus
+```
+
+The `spacenet` branch is the main branch in the repository. To find the latest release deployed over Spacenet, you can check the [last release](https://github.com/consensus-shipyard/lotus/releases) published in the repository and use checkout that tag:
+
+```shell
+git checkout
+```
+
+Compile Eudico for Spacenet:
+
+```shell
+make spacenet
+```
+
+Run your node:
+
+```shell
+./eudico mir daemon --bootstrap=true
+```
+
+With this, your node should automatically connect to the bootstraps of the network and start syncing the latest state of the chain.
+
+{% hint style="info" %}
+More information and further details about the operation of Spacenet can be found in the [Spacenet repository](https://github.com/consensus-shipyard/spacenet).
+{% endhint %}
+
+With the node running, you are ready to connect the IPC agent to Spacenet. For this, you’ll need to get an authentication token for your node, and point to the RPC API of the node (by default running on port `1234`).
+
+```shell
+# Generate auth token to node
+./eudico auth create-token --perm admin
+
+eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.8vIV7pCrWx-nxOBAAw_IayDzrGf22kMjagRYmj_8Qqw
+```
+
+Additionally, you should create a new wallet address (if you don’t have one already) to use for your IPC interactions. You can create a new wallet by running the following command in your Eudico node:
+
+```shell
+# Create new wallet
+./eudico wallet new
+
+t1cp4q4lqsdhob23ysywffg2tvbmar5cshia4rweq
+```
+
+With all this information, the config of the agent should be updated to connect to the peer and start interacting with Spacenet’s IPC by editing the following section for the `/root`:
+
+```toml
+[[subnets]]
+id = "/root"
+gateway_addr = "t064"
+network_name = "root"
+jsonrpc_api_http = "http://127.0.0.1:1234/rpc/v1"
+auth_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.8vIV7pCrWx-nxOBAAw_IayDzrGf22kMjagRYmj_8Qqw"
+accounts = ["t1xbevqterae2tanmh2kaqksnoacflrv6w2dflq4i"]
+```
+
+You can now start your IPC agent daemon with `./bin/ipc-agent daemon`.
+
+{% hint style="warning" %}
+In the current implementation of Spacenet, the gateway is always deployed in the `t064` address. This should be the address always reflected on your config for the gateway. In the future, this will change, and the gateway may be deployed in different addresses.
+{% endhint %}
+
+To check if the agent has been connected to Spacenet successfully you can try creating a new wallet in the network, but this type through the agent by running:
+
+```shell
+./bin/ipc-agent wallet new --key-type=bls
+
+2023-03-30T12:01:11Z INFO ipc_agent::cli::commands::manager::wallet] created new wallet with address WalletNewResponse { address: "t1om5pijjq5dqic4ccnqqrvv6zgzwrlxf6bh2apvi" } in subnet "/root"
+```
+
+Finally, to be able to interact with Spacenet and run new subnets, some FIL should be provided to, at least, the wallet that will be used by the agent to interact with IPC. You can request some Spacenet FIL for your address through the [Spacenet Faucet](https://spacenet.consensus.ninja/).
+
+#### Local deployment
+
+To deploy sample rootnet locally for testing you can use the IPC scripts installed in `./bin/ipc-infra` by running:
+
+```shell
+./bin/ipc-infra/run-root-docker-1val.sh
+```
+
+For instance, running `./bin/ipc-infra/run-root-docker-1val.sh 1235 1379` will run a rootnet daemon listening at `localhost:1235`, and a single validator mining in the rootnet listening through its libp2p host in `localhost:1379`. The end of the log in the execution of this script should look something like:
+
+```plaintext
+>>> Root daemon running in container: 84711d67cf162e30747c4525d69728c4dea8c6b4b35cd89f6d0947fee14bf908
+>>> Token to /root daemon: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.j94YYOr8_AWhGGHQd0q8JuQVuNhJA017SK9EUkqDOO0
+>>> Default wallet: t1cp4q4lqsdhob23ysywffg2tvbmar5cshia4rweq
+```
+
+This information will be relevant to configure our agent to connect to this rootnet node. For this, the `config.toml` section should be updated accordingly. In the above case, we need to set the endpoint of our rootnet node to be `127.0.0.1:1235`, we need to set `auth_token` to the one provided by the script, and the default account, for instance, the one provided by the script (although we could use ant other).
+
+The configuration for our rootnet should look therefore like this:
+
+```toml
+[[subnets]]
+id = "/root"
+gateway_addr = "t064"
+network_name = "root"
+jsonrpc_api_http = "http://127.0.0.1:1235/rpc/v1"
+auth_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.j94YYOr8_AWhGGHQd0q8JuQVuNhJA017SK9EUkqDOO0"
+accounts = ["t1cp4q4lqsdhob23ysywffg2tvbmar5cshia4rweq"]
+```
+
+{% hint style="warning" %}
+If you are already running the daemon, changes in the config file are only picked up after running `./bin/ipc-agent config reload` so be sure to run it after editing your config.
+{% endhint %}
+
+Finally, to test if the connection to the rootnet has been successful, we can for instance try to create a new wallet in the rootnet:
+
+```shell
+./bin/ipc-agent wallet new --key-type=bls
+```
+
+### Running a subnet
+
+To spawn a new subnet, our IPC agent should be connected to at least the subnet of the parent we want to spawn the subnet from. You can refer to the previous section for information on how to run or connect to a rootnet. This instructions will assume the deployment of a subnet from `/root`, but the steps are equivalent for any other parent subnet.
+
+#### Spawn subnet actor
+
+To run a subnet the first thing is to configure and create the subnet actor that will govern the subnet’s operation:
+
+```shell
+./bin/ipc-agent subnet create -p -n --min-validator-stake 1 --min-validators --bottomup-check-period --topdown-check-period
+
+# Sample command execution
+./bin/ipc-agent subnet create -p /root -n test --min-validator-stake 1 \
+--min-validators 0 --bottomup-check-period 10 --topdown-check-period 10
+
+[2023-03-21T09:32:58Z INFO ipc_agent::cli::commands::manager::create] created subnet actor with id: /root/t01002
+```
+
+This command deploys a subnet actor for a new subnet from the `root`, with a human-readable name `test`, that requires at least `1` validator to join the subnet to be able to mine new blocks, and with a checkpointing period to the parent of `10` blocks. We can see that the output of this command is the ID of the new subnet.
+
+#### Exporting wallet keys
+
+In order to run a validator in a subnet, we’ll need a set of keys to handle that validator. To export the validator key from a wallet that may live in another network into a file (like the wallet address we are using in the rootnet), we can use the following Lotus command:
+
+```shell
+eudico wallet export --lotus-json >
+```
+
+For example:
+
+```shell
+eudico wallet export --lotus-json t1cp4q4lqsdhob23ysywffg2tvbmar5cshia4rweq > ~/.ipc-agent/wallet.key
+```
+
+If your daemon is running on a docker container, you can get the container id (provided also in the output of the infra scripts), and run the following command above inside a container outputting the exported private key into a file locally:
+
+```shell
+docker exec -it eudico wallet export --lotus-json > ~/.ipc-agent/wallet.key
+```
+
+For example:
+
+```shell
+docker exec -it 84711d67cf162e30747c4525d69728c4dea8c6b4b35cd89f6d0947fee14bf908 eudico wallet export --lotus-json t1cp4q4lqsdhob23ysywffg2tvbmar5cshia4rweq > ~/.ipc-agent/wallet.key
+```
+
+Let’s illustrate the flow by creating a new wallet in our recently deployed root and exporting the keys.
+
+```shell
+# Create the new wallet
+./bin/ipc-agent wallet new --key-type=secp256k1
+[2023-03-29T09:32:52Z INFO ipc_agent::cli::commands::manager::wallet] created new wallet with address WalletNewResponse { address: "t17rnww5qirr2fh5uiqy6fyi6ix7otwjzgu6pgpey" } in subnet "/root"
+
+# Export the created wallet into ipc-agent
+docker exec -it eudico wallet export --lotus-json >
+
+# Sample execution for the address created above
+docker exec -it 84711d67cf162e30747c4525d69728c4dea8c6b4b35cd89f6d0947fee14bf908 eudico wallet export --lotus-json t17rnww5qirr2fh5uiqy6fyi6ix7otwjzgu6pgpey > ~/.ipc-agent/wallet.key
+```
+
+#### Deploy subnet daemon
+
+Before joining a new subnet, our node for that subnet should be initialized, because as part of the joining process we would need to provide information about our validator network address, so other validators know how to dial them. For the deployment of subnet daemons we also provide a convenient infra script:
+
+```shell
+./bin/ipc-infra/run-subnet-docker.sh
+
+# Sample execution
+./bin/ipc-infra/run-subnet-docker.sh 1239 1349 /root/t01002 ~/.ipc-agent/wallet.key
+```
+
+{% hint style="danger" %}
+This script doesn’t support the use of relative paths for the wallet path.
+{% endhint %}
+
+The end of the log of the execution of this script provides a bit more of information than the previous one as it is implemented to be used for production deployments:
+
+```shell
+>>> Subnet /root/t01002 daemon running in container: 22312347b743f1e95e50a31c1f47736580c9a84819f41cb4ed3d80161a0d750f
+>>> Token to /root/t01002 daemon: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.TnoDqZJ1fqdkr_oCHFEXvdwU6kYR7Va_ALyEuoPnksA
+>>> Default wallet: t1cp4q4lqsdhob23ysywffg2tvbmar5cshia4rweq
+>>> Subnet subnet validator info:
+t1cp4q4lqsdhob23ysywffg2tvbmar5cshia4rweq@/ip4/172.17.0.3/udp/1348/quic/p2p/12D3KooWN5hbWkCxwvrX9xYxMwFbWm2Jpa1o4qhwifmS t1cp4q4lqsdhob23ysywffg2tvbmar5cshia4rweq@/ip4/127.0.0.1/udp/1348/quic/p2p/12D3KooWN5hbWkCxwvrX9xYxMwFbWm2Jpa1o4qhwifmS t1cp4q4lqsdhob23ysywffg2tvbmar5cshia4rweq@/ip4/172.17.0.3/tcp/1347/p2p/12D3KooWN5hbWkCxwvrX9xYxMwFbWm2Jpa1o4qhwifmSw3Fb t1cp4q4lqsdhob23ysywffg2tvbmar5cshia4rweq@/ip4/127.0.0.1/tcp/1347/p2p/12D3KooWN5hbWkCxwvrX9xYxMwFbWm2Jpa1o4qhwifmSw3FbaVcL
+>>> API listening in host port 1239
+>>> Validator listening in host port 1349
+```
+
+The validator address specified here should be the same as the one that will be used in the next step to join the subnet.
+
+This log provides information about the API and auth tokens for the daemon, default validator wallet used, the multiaddresses where the validator is listening, etc. To configure our IPC agent with this subnet daemon, we need to once again update our IPC agent with the relevant information. In this case, for the sample execution above we need to add the following section to the end of our config file:
+
+```toml
+[[subnets]]
+id = "/root/t01002"
+gateway_addr = "t064"
+network_name = "test"
+jsonrpc_api_http = "http://127.0.0.1:1239/rpc/v1"
+auth_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.TnoDqZJ1fqdkr_oCHFEXvdwU6kYR7Va_ALyEuoPnksA"
+accounts = ["t1cp4q4lqsdhob23ysywffg2tvbmar5cshia4rweq"]
+```
+
+As always, remember to run `./bin/ipc-agent config reload` for changes in the config of the agent to be picked up by the daemon.
+
+#### Joining a subnet
+
+With the daemon for the subnet deployed, we can join the subnet:
+
+```shell
+./bin/ipc-agent subnet join --subnet= --collateral= --validator-net-addr=
+```
+
+For example:
+
+```bash
+./bin/ipc-agent subnet join --subnet=/root/t01002 --collateral=2 --validator-net-addr="GET_ADDRESS_FROM_SCRIPT"
+```
+
+This command specifies the subnet to join, the amount of collateral to provide and the validator net address used by other validators to dial them. We can pick up this information from the execution of the script above or running `eudico mir validator config validator-addr` from your deployment. Bear in mind that the multiaddress provided for the validator needs to be accessible publicly by other validators. According to the deployment used you may need to tweak the IP addresses of the multiaddresses and the ones provided by these scripts and commands won’t be usable out-of-the-box.
+
+For instance, in the example above, we are using the DNS endpoint `/dns/host.docker.internal/` so other Docker containers for the subnet deployed in the host machine know how to contact the validator.
+
+As a sanity-check that we have joined the subnet successfully and that we provided enough collateral to register the subnet to IPC, we can list the child subnets of our parent with the following command:
+
+```shell
+./bin/ipc-agent subnet list --gateway-address= --subnet=
+
+```
+
+For example:
+
+```bash
+./bin/ipc-agent subnet list --gateway-address=t064 --subnet=/root
+
+# [2023-03-30T17:00:25Z INFO ipc_agent::cli::commands::manager::list_subnets] /root/t01003 - status: 0, collateral: 2 FIL, circ.supply: 0.0 FIL
+```
+
+In the current implementation of IPC the gateway actor is deployed as a system actor on the default address `t064`, so whenever one of the IPC commands requests the address of the gateway actor you can use that value.
+
+#### Mining in a subnet
+
+With our subnet daemon deployed, and having joined the network, as the minimum number of validators we set for our subnet is 0, we can start mining and creating new blocks in the subnet. Doing so is a simple as running the following script using as an argument the container of our subnet node:
+
+```shell
+./bin/ipc-infra/mine-subnet.sh
+```
+
+For example:
+
+```bash
+./bin/ipc-infra/mine-subnet.sh 84711d67cf162e30747c4525d69728c4dea8c6b4b35cd89f6d0947fee14bf908
+```
+
+The mining process is currently run in the foreground in interactive mode. Consider using `nohup ./bin/ipc-infra/mine-subnet.sh` or tmux to run the process in the background and redirect the logs to some file.
+
+#### Changing subnet validator network address
+
+It may be the case that while joining the subnet, you didn’t set the multiaddress for your validator correctly and you need to update it. You’ll realize that the network address of your validator is not configured correctly, because your agent throws an error when trying to connect to your subnet node, or starting the validator in your subnet throws a network-related error.
+
+Changing the validator is as simple as running the following command:
+
+```shell
+./bin/ipc-agent subnet set-validator-net-addr --subnet= --validator-net-addr=
+```
+
+For example:
+
+```bash
+./bin/ipc-agent subnet set-validator-net-addr --subnet=/root/t01002 --validator-net-addr="/dns/host.docker.internal/tcp/1349/p2p/12D3KooWDeN3bTvZEH11s9Gq5bDeZZLKgRZiMDcy2KmA6mUaT9KE"
+```
+
+#### Committing checkpoints from a subnet
+
+Subnets are periodically committing checkpoints to their parent every `check-period` (parameter defined when creating the subnet). When you configure the connection to your child subnet in the agent config, and `config reload`, your agent should automatically start the process responsible for creating the checkpoints and submitting them to the parent. This process will only commit new subnet if you are a validator in that subnet. If the agent has spawned successfully the checkpointing process, you should start seeing every now and then these logs:
+
+```plaintext
+[2023-03-29T09:52:48Z INFO ipc_agent::manager::checkpoint] Submitting checkpoint for account t1cp4q4lqsdhob23ysywffg2tvb
+[2023-03-29T09:52:55Z INFO ipc_agent::manager::checkpoint] successfully published checkpoint submission for epoch 50
+```
+
+It is common for the checkpointing process to fail if while configuring a child subnet: either because the auth token is not correct, or because no wallet addresses have been configured in the subnet, etc. If this happens, running `./bin/ipc-agent config reload` will restart the checkpoint manager and pick up the latest config values. Whenever you see an error in the checkpointing process, check that your subnet’s configuration is correct and `config reload` to restart the process.
+
+Finally, if you want to inspect the information of a range of checkpoints committed in the parent for a subnet, you can use the `list-bottomup` command provided by the agent as follows:
+
+```shell
+# List checkpoints between two epochs for a subnet
+./bin/ipc-agent checkpoint list-bottomup --from-epoch= --to-epoch= --subnet=
+
+```
+
+For example:
+
+```bash
+./bin/ipc-agent checkpoint list-bottomup --from-epoch=0 --to-epoch=100 --subnet=/
+root/t01002
+
+# [2023-03-29T12:43:42Z INFO ipc_agent::cli::commands::manager::list_checkpoints] epoch 0 - prev_check={"/":"bafy2bzacedkoa623kvi5gfis2yks7xxjl73vg7xwbojz4tpq63dd5jpfz757i"}, cross_msgs=null, child_checks=null
+# [2023-03-29T12:43:42Z INFO ipc_agent::cli::commands::manager::list_checkpoints] epoch 10 - prev_check={"/":"bafy2bzacecsatvda6lodrorh7y7foxjt3a2dexxx5jiyvtl7gimrrvywb7l5m"}, cross_msgs=null, child_checks=null
+# [2023-03-29T12:43:42Z INFO ipc_agent::cli::commands::manager::list_checkpoints] epoch 30 - prev_check={"/":"bafy2bzaceauzdx22hna4e4cqf55jqmd64a4fx72sxprzj72qhrwuxhdl7zexu"}, cross_msgs=null, child_checks=null
+```
+
+#### Sending funds in a subnet
+
+The agent provides a command to conveniently exchange funds between addresses of the same subnet. This can be achieved through the following command:
+
+```shell
+./bin/ipc-agent subnet send-value --subnet= --to=
+```
+
+For example:
+
+```
+./bin/ipc-agent subnet send-value --subnet=/root/t01002 --to=t1xbevqterae2tanmh2kaqksnoacflrv6w2dflq4i 10
+```
+
+#### Leaving a subnet
+
+To leave a subnet, the following agent command can be used:
+
+```shell
+./bin/ipc-agent subnet leave --subnet=
+```
+
+For example:
+
+```bash
+./bin/ipc-agent subnet leave --subnet=/root/t01002
+```
+
+Leaving a subnet will release the collateral for the validator and remove all the validation rights from its account. This means that if you have a validator running in that subnet, its validation process will immediately terminate.
+
+#### Importing a wallet to a subnet node
+
+Depending on if the subnet is running inside a docker container or not, we can use the following commands to import a wallet to a subnet node:
+
+```shell
+# Importing directly into the node
+eudico wallet import --lotus-json
+```
+
+```bash
+# Importing directly into a docker container
+docker cp :
+```
+
+```bash
+# Copy the wallet key inside the container
+docker exec -it sh -c "./eudico wallet import --format=json-lotus "
+```
+
+```bash
+# Sample execution
+docker cp ~/.ipc-agent/t1ivy6mo2ofxw4fdmft22nel66w63fb7cuyslm4cy.key 91d2af805346:/input.key
+docker exec -it 91d2af805346 sh -c "eudico wallet import --format=json-lotus input.key"
+```
+
+#### Running a subnet with several validators
+
+In this section, we will deploy a subnet where the IPC agent is responsible for handling more than one validator in the subnet. Throughout this guide, we use the `ipc-infra` scripts to deploy the nodes in Docker containers, but a similar steps could be used to deploy the nodes on bare-metal or any other runtime.
+
+For the rest of this tutorial, we’ll assume that you have your agent already configured and interacting with a rootnet. We are going to deploy a subnet with 5 validators. The first thing we’ll need to do is creating a new wallet for every validator we want to run. We can do this directly through the agent with the following command:
+
+```shell
+./bin/ipc-agent wallet new --key-type=secp256k1
+```
+
+We also need to provide with some funds our wallets so they can put collateral to join the subnet. According to the rootnet you are connected to, you may need to get some funds from the faucet, or send some from your main wallet. Funds can be send from your main wallet also through the agent with:
+
+```shell
+./bin/ipc-agent subnet send-value --subnet=/root --to=
+```
+
+With this, we can already create the subnet with `/root` as its parent. We are going to set the `--min-validators 5` so no new blocks can be created without this number of validators in the subnet.
+
+```shell
+# Creating a sample subnet with 5 as the minimum number of validators.
+./bin/ipc-agent subnet create -p /root -n test --min-validator-stake 1 --min-validators 5 --bottomup-check-period 10 --topdown-check-period 10
+```
+
+#### Deploying the infrastructure
+
+In order to deploy the 5 validators for the subnet, we will have to first export the keys from our root node so we can import them to our validators. Depending on how you are running your rootnet node you’ll have to make a call to the docker container, or your nodes API.
+
+With the five keys conveniently exported, we can deploy the subnet nodes using the `infra-scripts`. The following code snippet showcases the deployment of five sample nodes. Note that each node should be importing a different wallet key for their validator, and should be exposing different ports for their API and validators:
+
+```shell
+./bin/ipc-infra/run-subnet-docker.sh 1240 1359 /root/t01002 ~/.ipc-agent/wallet1.key
+./bin/ipc-infra/run-subnet-docker.sh 1250 1369 /root/t01002 ~/.ipc-agent/wallet2.key
+./bin/ipc-infra/run-subnet-docker.sh 1280 1379 /root/t01002 ~/.ipc-agent/wallet3.key
+./bin/ipc-infra/run-subnet-docker.sh 1270 1389 /root/t01002 ~/.ipc-agent/wallet4.key
+./bin/ipc-infra/run-subnet-docker.sh 1280 1399 /root/t01002 ~/.ipc-agent/wallet5.key
+```
+
+If the deployment is successful each of these nodes should return the following output at the end of their logs. Note down this information somewhere as we will need it to conveniently join our validators to the subnet.
+
+```plaintext
+>>> Subnet /root/t01002 daemon running in container: 91d2af80534665a8d9a20127e480c16136d352a79563e74ee3c5497d50b9eda8 (friendly name: ipc_root_t01002_1240)
+>>> Token to /root/t01002 daemon: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.JTiumQwFIutkTb0gUC5JWTATs-lUvDaopEDE0ewgzLk
+>>> Default wallet: t1ivy6mo2ofxw4fdmft22nel66w63fb7cuyslm4cy
+>>> Subnet subnet validator info:
+t1ivy6mo2ofxw4fdmft22nel66w63fb7cuyslm4cy@/ip4/172.17.0.4/udp/1348/quic/p2p/12D3KooWEJXcSPw6Yv4jDk52xvp2rdeG3J6jCPX9AgBJE2mRCVoR
+t1ivy6mo2ofxw4fdmft22nel66w63fb7cuyslm4cy@/ip4/127.0.0.1/udp/1348/quic/p2p/12D3KooWEJXcSPw6Yv4jDk52xvp2rdeG3J6jCPX9AgBJE2mRCVoR
+t1ivy6mo2ofxw4fdmft22nel66w63fb7cuyslm4cy@/ip4/172.17.0.4/tcp/1347/p2p/12D3KooWEJXcSPw6Yv4jDk52xvp2rdeG3J6jCPX9AgBJE2mRCVoR
+t1ivy6mo2ofxw4fdmft22nel66w63fb7cuyslm4cy@/ip4/127.0.0.1/tcp/1347/p2p/12D3KooWEJXcSPw6Yv4jDk52xvp2rdeG3J6jCPX9AgBJE2mRCVoR
+>>> API listening in host port 1240
+>>> Validator listening in host port 1359
+```
+
+#### Configuring the agent
+
+To configure the agent for its use with all the validators, we need to connect to the RPC API of one of the validators, and import all of the wallets of the validators in that node, so the agent is able through the same API to act on behalf of any validator.
+
+Here’s an example of the configuration connecting to the RPC of the first validator, and configuring all the wallets for the validators in the subnet.
+
+```toml
+[[subnets]]
+id = "/root/t01002"
+gateway_addr = "t064"
+network_name = "test"
+jsonrpc_api_http = "http://127.0.0.1:1240/rpc/v1"
+auth_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.JTiumQwFIutkTb0gUC5JWTATs-lUvDaopEDE0ewgzLk"
+accounts = ["t1ivy6mo2ofxw4fdmft22nel66w63fb7cuyslm4cy", "t1cp4q4lqsdhob23ysywffg2tvbmar5cshia4rweq", "t1nv5jrdxk4ljzndaecfjgmu35k6iz54pkufktvua", "t1pr3qzqieikp33pfdaygwkwt5v35a5ljsxsu64xq", "t1pmxk3nhg2u2ge7ioilrk7c5rmdmmbgpazipvzyy"]
+```
+
+Remember to run `./bin/ipc-agent config reload` for your agent to pick up the latest changes for the config.
+
+#### Joining the subnet
+
+All the infrastructure for the subnet is now deployed, and we can join our validators to the subnet. For this, we need to send a `join` command from each of our validators from their validator wallet addresses providing the validators multiaddress. We can get the validator multiaddress from the output of the script we ran to deploy the infrastructure (or by running `eudico mir validator config validator-addr`). These scripts expose through docker the port for the TPC port of the libp2p host of the validator (if there is interest to use the UDP-based multiaddress drop an issue and we can update them accordingly). Hence, we need to choose from the available multiaddresses one that exposes a TCP connection. Finally, our validators are configured so their docker containers interact with each other through ports in the host machine, so instead of exposing an IP address for the multiaddress, the following DNS-based multiaddress needs to be used for all validators: `/dns/host.docker.internal/`.
+
+This is the command that needs to be executed for every validator to join the subnet:
+
+```shell
+./bin/ipc-agent subnet join --from= --subnet=/root/t01002 --collateral= --validator-net-addr="/dns/host.docker.internal/tcp//p2p/"
+```
+
+For example:
+
+```bash
+# Sample execution for the validator whose logs where shared above for 2FIL collateral
+./bin/ipc-agent subnet join --from=t1ivy6mo2ofxw4fdmft22nel66w63fb7cuyslm4cy --subnet=/root/t01002 --collateral=2 --validator-net-addr="/dns/host.docker.internal/tcp/1359/p2p/12D3KooWEJXcSPw6Yv4jDk52xvp2rdeG3J6jCPX9AgBJE2mRCVoR
+```
+
+Remember doing the above step for the five validators.
+
+#### Mining in subnet
+
+We have everything in place now to start mining from all the validators. Mining is as simple as running the following script passing the container id for the validators:
+
+```shell
+./bin/ipc-infra/mine-subnet.sh
+```
+
+The mining process is currently run in the foreground in interactive mode. Consider using `nohup ./bin/ipc-infra/mine-subnet.sh` or tmux to run the process in the background and redirect the logs to some file as handling the mining process of the five validators in the foreground may be quite cumbersome.
+
+### Troubleshooting
+
+#### I need to upgrade my IPC agent
+
+Sometimes, things break, and we’ll need to push a quick path to fix some bug. If this happens, and you need to upgrade your agent version, kill you agent daemon if you have any running, pull the latest changes from this repository, build the binary, and start your daemon again. This should pick up the latest version for the agent. In the future, we will provide a better way to upgrade your agent.
+
+```shell
+# Pull latest changes
+git pull
+
+# Build the agent
+make build
+
+# Restart the daemon
+./bin/ipc-agent daemon
+```
+
+#### The make install-infra command is not building the Eudico image
+
+`make install-infra` may fail and not build the `eudico` image if your system is not configured correctly. If this happens, you can always try to build the image yourself to have a finer-grain report of the issues to help you debug them. For this you can [follow these instructions](https://github.com/consensus-shipyard/lotus/blob/spacenet/scripts/ipc/README.md).
+
+High-level you just need to clone the [Eudico repository](https://github.com/consensus-shipyard/lotus), and run `docker build -t eudico .` in the root of the repository.
+
+#### My subnet node doesn’t start
+
+Either because the dockerized subnet node after running `./bin/ipc-infra/run-subnet-docker.sh` gets stuck waiting for the API to be started with the following message:
+
+```plaintext
+Not online yet... (could not get API info for FullNode: could not get api endpoint: API not running (no endpoint))
+```
+
+Or because when the script finishes no validator address has been reported as expected by the logs, the best way to debug this situation is to attach to the docker container and check the logs with the following command:
+
+```shell
+docker exec -it bash
+```
+
+Once inside the container, run:
+
+```bash
+tmux a
+```
+
+Generally, the issue is that:
+
+* You haven’t passed the validator key correctly and it couldn’t be imported.
+* There was some network instability, and lotus parameters couldn’t be downloaded successfully.
+
+#### My agent is not submitting checkpoints after an error
+
+Try running `./bin/ipc-agent config reload`, this should pick up the latest config and restart all checkpointing processes. If the error has been fixed or it was an network instability between the agent and your subnet daemon, checkpoints should start being committed again seamlessly.
diff --git a/basics/interplanetary-consensus/mir-and-trantor.md b/basics/interplanetary-consensus/mir-and-trantor.md
new file mode 100644
index 000000000..fd01215ac
--- /dev/null
+++ b/basics/interplanetary-consensus/mir-and-trantor.md
@@ -0,0 +1,25 @@
+---
+description: >-
+ Alongside the scaling framework, the IPC project is developing
+ high-performance consensus protocols that are suitable for subnet use.
+---
+
+# Mir and Trantor
+
+Beyond their function as a scaling tool, IPC subnets will be deployed to meet the requirements of applications that cannot be run on the Filecoin rootnet. The combination of Mir and Trantor provides a high-performance consensus mechanism with faster finality and higher throughput. Moreover, Trantor can be further configured for each subnet.
+
+### Mir framework
+
+[Mir](https://github.com/filecoin-project/mir) is a framework for implementing, debugging, and analyzing distributed protocols. It is organized as a library that provides abstractions representing different components of a distributed system and an engine to orchestrate their interaction.
+
+The goal of Mir is to enable the implementation of distributed protocols in a way that is agnostic to network transport, storage, and cryptographic primitives. These components of a distributed protocol implementation are encapsulated in abstractions with well-defined interfaces. While Mir provides some out-of-the-box implementations, users are free to define their own.
+
+Mir is used as a scalable and efficient consensus layer in Filecoin subnets. Learn more at [GitHub](https://github.com/filecoin-project/mir).
+
+### Trantor consensus protocol
+
+[Trantor](https://github.com/filecoin-project/mir/tree/main/pkg/trantor) is a modern, multi-leader, Byzantine fault-tolerant (BFT) protocol. It was inspired by recently proposed, high-throughput, BFT consensus protocols, namely ISS and Narwahl. Trantor iterates through instances of PBFT with immediate finality, each block containing an ordered list of decided transactions and a certificate for verification, with every nth block containing a checkpoint of the state.
+
+Learn more on [GitHub](https://github.com/filecoin-project/mir/tree/main/pkg/trantor).
+
+To stay updated on Mir and Trantor development, join the `#mir-dev` channel in the [Filecoin Slack workspace](https://filecoin.io/slack/).
diff --git a/basics/project-and-community/README.md b/basics/project-and-community/README.md
new file mode 100644
index 000000000..4c9f10c69
--- /dev/null
+++ b/basics/project-and-community/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ This section contains information about the Filecoin project as a whole, and
+ how you can interact with the community.
+---
+
+# Project and community
+
diff --git a/basics/project-and-community/chat-and-discussion-forums.md b/basics/project-and-community/chat-and-discussion-forums.md
new file mode 100644
index 000000000..e7cf40b6f
--- /dev/null
+++ b/basics/project-and-community/chat-and-discussion-forums.md
@@ -0,0 +1,19 @@
+---
+description: >-
+ Connect with the Filecoin community in discussion forums or on IRC. The
+ Filecoin community is active and here to answer your questions in your channel
+ of choice.
+---
+
+# Chat and discussion forums
+
+### Chat
+
+For shorter-lived discussions, our community chat open to all on both Slack and Matrix, with bridged channels allowing you to participate in the same conversations from either platform:
+
+* [Slack](https://filecoin.io/slack/)
+* [Matrix](https://matrix.to/#/#ipfs-space:ipfs.io)
+
+### Discussion Forums
+
+For long-lived discussions and for support, please use the [discussion tab on GitHub](https://github.com/filecoin-project/community#forums) instead of Slack or Matrix. It’s easy for complex discussions to get lost in a sea of new messages on those chat platforms, and posting longer discussions and support requests on the forums helps future visitors, too.
diff --git a/basics/project-and-community/filecoin-compared-to.md b/basics/project-and-community/filecoin-compared-to.md
new file mode 100644
index 000000000..977b3b16f
--- /dev/null
+++ b/basics/project-and-community/filecoin-compared-to.md
@@ -0,0 +1,41 @@
+---
+description: >-
+ While Filecoin shares some similarities to other file storage solutions, the
+ protocol has significant differences that one should consider.
+---
+
+# Filecoin compared to
+
+Filecoin combines many elements of other file storage and distribution systems. What makes Filecoin unique is that it runs on an open, peer-to-peer network while still providing economic incentives and proofs to ensure files are being stored correctly. This page compares Filecoin against other technologies that share some of the same properties.
+
+* [Filecoin vs. Amazon S3, Google Cloud Storage](filecoin-compared-to.md#filecoin-vs.-amazon-s3-google-cloud-storage)
+* [Filecoin vs. Bitcoin](filecoin-compared-to.md#filecoin-tokens-fil-vs.-bitcoin-tokens-btc)
+
+#### Filecoin vs. Amazon S3, Google Cloud Storage
+
+| | Filecoin | Amazon S3, Google Cloud Storage |
+| --------------------------- | ------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- |
+| Main use case | Storing files at hypercompetitive prices | Storing files using a familiar, widely-supported service |
+| Pricing | Determined by a hypercompetitive open market | Set by corporate pricing departments |
+| Centralization | Many small, independent storage providers | A handful of large companies |
+| Reliability stats | Independently checked by the network and publicly verifiable | Companies self-report their own stats |
+| API | Applications can access all storage providers using the Filecoin protocol | Applications must implement a different API for each storage provider |
+| Retrieval | Competitive market for retrieving files | Typically more expensive than storing files to lock users in |
+| Fault handling | If a file is lost, the user is refunded automatically by the network | Companies can offer users credit if files are lost or unavailable |
+| Support | If something goes wrong, the Filecoin protocol determines what happens without human intervention | If something goes wrong, users contact the support help desk to seek resolution |
+| Physical location | Miners located anywhere in the world | Limited to where provider’s data centres are located |
+| Becoming a storage provider | Low barrier to entry for storage providers (computer, hard drive, internet connection) | High barrier to entry for storage providers (legal agreements, marketing, support staff) |
+
+#### Filecoin tokens (FIL) vs. Bitcoin tokens (BTC)
+
+| | FIL | BTC |
+| ------------------- | -------------------------------------------------------------------- | --------------------------------------------------------------------- |
+| Main use case | File storage | Payment network |
+| Data storage | Good at storing large amounts of data inexpensively | Small amounts of data can be stored on blockchain at significant cost |
+| Proof | Blockchain secured using proof of replication and proof of spacetime | Blockchain secured using proof of work |
+| Consensus power | Miners with the most storage have the most power | Miners with the most computational speed have the most power |
+| Mining hardware | Hard drives, GPUs, and CPUs | ASICs |
+| Mining usefulness | Mining results in peoples’ files being stored | Mining results in heat |
+| Types of provider | Storage provider, retrieval provider, repair provider | All providers perform proof of work |
+| Uptime requirements | Storage providers rewarded for uptime, penalized for downtime | Miners can go offline without being penalized |
+| Network status | Mainnet running since 2020 | Mainnet running since 2009 |
diff --git a/basics/project-and-community/filecoin-faqs.md b/basics/project-and-community/filecoin-faqs.md
new file mode 100644
index 000000000..c8881a906
--- /dev/null
+++ b/basics/project-and-community/filecoin-faqs.md
@@ -0,0 +1,156 @@
+---
+description: >-
+ Answers to your frequently asked questions on everything from Filecoin’s
+ crypto-economics and storage expenses to hardware and networking.
+---
+
+# Filecoin FAQs
+
+#### What are some of the primary use cases for Filecoin?
+
+Filecoin is a protocol that provides core primitives, enabling a truly trustless decentralized storage network. These primitives and features include publicly verifiable cryptographic storage proofs, [cryptoeconomic mechanisms](https://filecoin.io/blog/filecoin-cryptoeconomic-constructions/), and a public blockchain. Filecoin provides these primitives to solve the really hard problem of creating a trustless decentralized storage network.
+
+On top of the core Filecoin protocol, there are a number of layer 2 solutions that enable a broad array of use cases and applications, many of which also use [IPFS](https://ipfs.tech). These solutions include [Powergate](https://docs.textile.io/powergate/), [Textile Hub](https://blog.textile.io/announcing-the-textile-protocol-hub/), and more. Using these solutions, any use case that can be built on top of IPFS can also be built on Filecoin!
+
+Some of the primary areas for development on Filecoin are:
+
+* Additional developer tools and layer-2 solutions and libraries that strengthen Filecoin as a developer platform and ecosystem.
+* IPFS apps that rely on decentralized storage solutions and want a decentralized data persistence solution as well.
+* Financial tools and services on Filecoin, like wallets, signing libraries, and more.
+* Applications that use Filecoin’s publicly verifiable cryptographic proofs in order to provide trustless and timestamped guarantees of storage to their users.
+
+#### How can a website or app be free if it costs to retrieve data from the Filecoin network?
+
+Most websites and apps make money by displaying ads. This type of income-model could be replaced with a Filecoin incentivized retrieval setup, where users pay small amounts of FIL for whatever files they’re hoping to download. Several large datasets are hosted through Amazon’s _pay per download_ S3 buckets, which Filecoin retrieval could also easily augment or replace.
+
+#### How will Filecoin attract developers to use Filecoin for storage?
+
+It’s going to require a major shift in how we think about the internet. At the same time, it is a very exciting shift, and things are slowly heading that way. Browser vendors like Brave, Opera, and Firefox are investing into decentralized infrastructure.
+
+We think that the internet must return to its _decentralized roots_ to be resilient, robust, and efficient enough for the challenges of the next several decades. Early developers in the Filecoin ecosystem are those who believe in that same vision and potential for the internet, and we’re excited to work with them to build this space.
+
+#### What are the detailed parameters of Filecoin’s cryptoeconomics?
+
+We are still finalizing our cryptoeconomic parameters, and they will continue to evolve.
+
+Here is a blog about Filecoin economics from December 2020: [Filecoin network economics](https://filecoin.io/blog/posts/filecoin-network-economics/).
+
+#### How expensive will Filecoin storage be at launch?
+
+As Filecoin is a free market, the price will be determined by a number of variables related to the supply and demand for storage. It’s difficult to predict before launch. However, a few design elements of the network help support inexpensive storage.
+
+Along with revenue from active storage deals, Storage Miners receive block rewards, where the expected value of winning a given block reward is proportional to the amount of storage they have on the network. These block rewards are weighted heavily towards the early days of the network (with the frequency of block rewards exponentially decaying over time). As a result, Storage Miners are relatively incentivized to charge less for storage to win more deals, which would increase their expected block reward.
+
+Further, Filecoin introduces a concept called _Verified Clients_, where clients can be verified to actually be storing useful data. Storage Miners who store data from _Verified Clients_ also increase their expected block reward. Anyone running a Filecoin-backed IPFS Pinning Services should qualify as a _Verified Client_. We do not have the process of verification finalized, but we expect it to be similar to submitting a GitHub profile.
+
+#### Will it be cheaper to store data on Filecoin than other centralized cloud services?
+
+Filecoin creates a hyper-competitive market for data storage. There will be many storage providers offering many prices, rather than one fixed price on the network. We expect Filecoin’s permissionless model and low barriers to entry to result in some very efficient operations and low-priced storage, but it’s impossible to say what exact prices will be until the network is live.
+
+#### What happens to the existing content on IPFS once Filecoin launches? What if nodes continue to host content for free and undermine the Filecoin incentive layer?
+
+IPFS will continue to exist as it is, enhanced with Filecoin nodes. There are many use cases that require no financial incentive. Think of it like IPFS is HTTP, and Filecoin is a storage cloud-like S3 – only a fraction of IPFS content will be there.
+
+People with unused storage who want to earn monetary rewards should pledge that storage to Filecoin, and clients who want guaranteed storage should store that data with Filecoin storage providers.
+
+#### Lotus or Venus, which is better for storage providers?
+
+Lotus is the primary reference implementation for the Filecoin protocol. At this stage, we would recommend most storage providers use lotus to participate in the Filecoin network.
+
+#### What is your recommendation on the right hardware to use?
+
+While the Filecoin team does not recommend a specific hardware configuration, we document various setups [here](../../storage-providers/infrastructure/). Additionally, [this guide to storage mining](../../storage-providers/basics/quickstart-guide.md) details hardware considerations and setups for storage providers. However, it is likely that there are more efficient setups, and we strongly encourage storage providers to test and experiment to find the best combinations.
+
+#### We are worried about the ability of our network to handle the additional overhead of running a Filecoin node and still provide fast services for our customers. What are the computational demands of a Lotus node? Are there any metrics for node performance given various requirements?
+
+For information on Lotus requirements, see [Prerequisites > Minimal requirements](https://lotus.filecoin.io/lotus/install/prerequisites/#minimal-requirements).
+
+For information on Lotus full nodes and lite nodes, see [Types of nodes](https://lotus.filecoin.io/lotus/get-started/use-cases/).
+
+#### We bought a lot of hard drives of data through the Discover project. When will they be shipped to China?
+
+There are a number of details that are still being finalized between the verified deals construction and the associated cryptoeconomic parameters.
+
+Our aim is to allow these details to finalize before shipping, but given timelines, we’re considering enabling teams to take receipt of these drives before the parameters are set. We will publish updates on the status of the Discover project on the Filecoin blog.
+
+#### Do Filecoin storage providers need a fixed IP?
+
+For mainnet, you will need a public IP address, but it doesn’t need to be fixed (just accessible).
+
+#### What if we lost a sector accidentally, is there any way to fix that?
+
+If you lost the data itself, then no, there’s no way to recover that, and you will be slashed for it. If the data itself is recoverable, though (say you just missed a _WindowPoSt_), then the Recovery process will let you regain the sector.
+
+#### Has Filecoin confirmed the use of the SDR algorithm? Is there any evidence of malicious construction?
+
+SDR ([Stacked DRG PoRep](https://spec.filecoin.io/algorithms/porep-old/stacked\_drg/#section-algorithms.porep-old.stacked\_drg)) is confirmed and used, and we have no evidence of malicious construction. The algorithm is also going through both internal and external security audits.
+
+If you have any information about any potential security problem or malicious construction, reach out to our team at [security@filecoin.org](mailto:security@filecoin.org).
+
+#### How likely is it that the Filecoin protocol will switch to the NSE Proof-of-Replication construction later?
+
+Native storage extension (NSE) is one of the best candidates for a proof upgrade, and teams are working on implementation. But there are other candidates too, which are promising as well. It may be that another algorithm ends up better than NSE – we don’t know yet. Proof upgrades will arrive after the mainnet launch and will coexist.
+
+AMD may be optimal hardware for SDR. You can [see this description](https://github.com/filecoin-project/lotus/blob/master/documentation/en/sealing-procs.md) for more information on why.
+
+#### How are you working on bootstrapping the demand side of the marketplace? The Discover program is nice, but who is the target market for users, and how do you get them?
+
+In addition to [Filecoin Discover](https://filecoin.io/blog/posts/introducing-filecoin-discover/), a number of groups are actively building tools and services to support the adoption of the Filecoin network with developers and clients. For example, check out the recordings from our [Virtual Community Meetup](https://filecoin.io/blog/filecoin-virtual-community-meetup-recap/) to see updates about Textile Powergate and Starling Storage. You can also read more about some of the teams building on Filecoin through HackFS in our [HackFS Week 1 Recap](https://filecoin.io/blog/hackfs-teams-vol-1/).
+
+#### Does Filecoin have an implementation of client and storage provider order matching through order books?
+
+There will be off-chain [order books](https://www.investopedia.com/terms/o/order-book.asp) and storage provider marketplaces – some are in development now from some teams. They will work mostly off-chain because transactions per second on-chain are not enough for the volume of usage we expect on Filecoin. These order books build on the basic deal-flow on-chain. These order books will arrive in their own development trajectory – most likely around or soon after the mainnet launch.
+
+#### Why does Filecoin mining work best on AMD?
+
+Currently, Filecoin’s Proof of Replication (PoRep) prefers to be run on AMD processors. See this description of Filecoin sealing for more information. More accurately, it runs much slower on Intel CPUs. It runs competitively fast on some ARM processors, like the ones in newer Samsung phones, but they lack the RAM to seal the larger sector sizes. The main reason that we see this benefit on AMD processors is due to their implementation of the SHA hardware instructions.
+
+#### What do storage providers have to do to change a committed capacity (CC) sector into a “real-data” sector?
+
+Storage providers will publish storage deals that they will upgrade the CC sector with, announce to the chain that they are doing an upgrade, and prove to the chain that a new sector has been sealed correctly. We expect to evolve and make this cheaper and more attractive over time after the mainnet launch.
+
+#### What does “terminating a sector” mean?
+
+When a committed capacity sector is added to the chain, it can upgrade to a sector with deals, extend its lifetime, or terminate through either faults or voluntary actions. While we don’t expect this to happen very often on mainnet, a storage provider may deem it rational to terminate their promise to the network and their clients, and accept a penalty for doing so.
+
+#### Does the committed capacity sector still need to be sealed before it upgrades to one with real data?
+
+For the first iteration of the protocol, yes. We have plans to make it cheaper and more economically attractive after mainnet with no resealing required and other perks.
+
+#### What’s the minimum time period for the storage contract between the provider and the buyer?
+
+The minimum duration for a deal is set in the storage provider’s ask. There’s also a practical limitation because sectors have a minimum duration (currently 180 days).
+
+#### After I made a deal with a storage provider and sent my data to them, how exactly is the data supposed to be recoverable and healable if that storage provider goes down?
+
+Automatic repair of faulted data is a feature we’ve pushed off until after the mainnet launch. For now, the way to ensure resiliency is to store your data with multiple storage providers, to gain some level of redundancy. If you want to learn more about how we are thinking about repair in the future, [here are some notes](https://github.com/filecoin-project/specs/pull/245/files).
+
+#### How do I know that my storage provider will not charge prohibitively high costs for data retrieval?
+
+To avoid extortion, always ensure you store your data with a fairly decentralized set of storage providers (and note: it’s pretty difficult for a storage provider to be sure they are the only person storing a particular piece of data, especially if you encrypt the data).
+
+Storage providers currently provide a ‘dumb box’ interface and will serve anyone any data they have. Maybe in the future, storage providers will offer access control lists (ACLs) and logins and such, but that requires that you trust the storage provider. The recommended (and safest) approach here is to encrypt data you don’t want others to see yourself before storing it.
+
+#### How do you update data stored on Filecoin?
+
+We have some really good ideas around ‘warm’ storage (that is mutable and provable) that we will probably implement in the near future. But for now, your app will have to treat Filecoin as an append-only log. If you want to change your data, you just write new data.
+
+‘Warm’ storage can be done with a small amount of trust, where you make a deal with a storage provider with a start date quite far in the future. The storage provider can choose to store your data in a sector now (but they won’t get paid for proving it until the actual start date), or they can hold it for you (and even send you proofs of it on request), and you can then send them new data to overwrite it, along with a new storage deal that overwrites the previous one.
+
+There’s a pretty large design space here, and we can do a bunch of different things depending on the levels of trust involved, the price sensitivity, and the frequency of updates clients desire.
+
+#### Who will be selected to be verifiers to verify clients on the network?
+
+Notaries, selected through an application process, serve as fiduciaries for the Filecoin Network and are responsible for allocating DataCap to clients with valuable storage use cases.
+
+See [Filecoin Plus](../how-storage-works/filecoin-plus.md).
+
+#### Will the existence of Filecoin mining pools lead to centralized storage and away from the vision of distributed storage?
+
+No – Filecoin creates a decentralized storage network in part by massively decreasing the barrier to entry to becoming a storage provider. Even if there were some large pools, anyone can join the network and provide storage with just a modest hardware purchase, and we expect clients to store their files with many diverse storage providers.
+
+Also, note that world location matters for mining: many clients will prefer storage providers in specific regions of the world, so this enables lots of storage providers to succeed across the world, where there is storage demand.
+
+#### Even though Filecoin will be backed up to our normal IPFS pinning layer, we still need to know how quickly we can access data from the Filecoin network. How fast will retrieval be from the Filecoin network?
+
+If you are retrieving your data from IPFS or a remote pinning layer, retrieval should take on the order of milliseconds to seconds in the worst case. Our latest tests for retrieval from the Filecoin network directly show that a sealed sector holding data takes \~1 hour to unseal. 1-5 hours is our best real-world estimate to go from sector unsealing to delivery of the data. If you need faster data retrieval for your application, we recommend building on Powergate or an FPS.
diff --git a/basics/project-and-community/related-projects.md b/basics/project-and-community/related-projects.md
new file mode 100644
index 000000000..3bebca5a7
--- /dev/null
+++ b/basics/project-and-community/related-projects.md
@@ -0,0 +1,28 @@
+---
+description: >-
+ Filecoin is a highly modular project that is itself made out of many different
+ protocols and tools. Many of these exist as their own projects, supported by
+ Protocol Labs. Learn more about them below.
+---
+
+# Related projects
+
+## Libp2p
+
+A modular network stack, libp2p enables you to run your network applications free from runtime and address services, independently of their location. Learn more at [libp2p.io/](http://libp2p.io/).
+
+### IPLD
+
+IPLD is the data model of the content-addressable web. It allows us to treat all hash-linked data structures as subsets of a unified information space, unifying all data models that link data with hashes as instances of IPLD. Learn more at [ipld.io/](https://ipld.io/).
+
+### IPFS
+
+IPFS is a distributed system for storing and accessing files, websites, applications, and data. However, it does not have support for incentivization or guarantees of this distributed storage; Filecoin provides the incentive layer. Learn more at [ipfs.tech/](https://ipfs.tech/).
+
+### Multiformats
+
+The Multiformats Project is a collection of protocols which aim to future-proof systems through self-describing format values that allow for interoperability and protocol agility. Learn more at [multiformats.io/](https://multiformats.io/).
+
+### ProtoSchool
+
+Interactive tutorials on decentralized web protocols, designed to introduce you to decentralized web concepts, protocols, and tools. Complete code challenges right in your web browser and track your progress as you go. Explore ProtoSchool’s tutorials on Filecoin at [proto.school/](https://proto.school/#/tutorials?course=filecoin).
diff --git a/basics/project-and-community/social-media.md b/basics/project-and-community/social-media.md
new file mode 100644
index 000000000..0348a8f56
--- /dev/null
+++ b/basics/project-and-community/social-media.md
@@ -0,0 +1,32 @@
+---
+description: >-
+ Filecoin is everywhere on the internet — and that includes social media. Find
+ your favorite flavor here.
+---
+
+# Social media
+
+### YouTube
+
+The [Filecoin YouTube channel](https://www.youtube.com/channel/UCPyYmtJYQwxM-EUyRUTp5DA) is home to a wealth of information about the Filecoin project — everything from developer demos to recordings of mining community calls — so you can explore playlists and subscribe to ones that interest and inform you.
+
+### Blog
+
+Explore the latest news, events and other happenings on the official [Filecoin Blog](https://filecoin.io/blog/).
+
+### Newsletter
+
+Subscribe to the [Filecoin newsletter](https://filecoin.io/build/#events) for official project updates sent straight to your inbox.
+
+### Twitter
+
+Get your Filecoin news in tweet-sized bites. Follow these accounts for the latest:
+
+* [`@Filecoin`](https://twitter.com/filecoin) for news and other updates from the Filecoin project
+* [`@ProtoSchool`](https://twitter.com/protoschool) for updates on ProtoSchool workshops and tutorials
+
+### WeChat
+
+Follow FilecoinOfficial on [WeChat](https://www.wechat.com/mobile) for project updates and announcements in Chinese.
+
+
diff --git a/basics/project-and-community/the-filecoin-project.md b/basics/project-and-community/the-filecoin-project.md
new file mode 100644
index 000000000..76ae691d1
--- /dev/null
+++ b/basics/project-and-community/the-filecoin-project.md
@@ -0,0 +1,19 @@
+---
+description: >-
+ Curious about how it all got started, or where we’re headed? Learn about the
+ history, current state, and future trajectory of the Filecoin project here.
+---
+
+# The Filecoin project
+
+### Roadmap
+
+The [Filecoin Community Roadmap](https://github.com/filecoin-project/community/discussions/456) is updated quarterly. It provides insight into the strategic development of the network and offers pathways for community members to learn more about ongoing work and connect directly with project teams.
+
+### Research
+
+Learn about the ongoing cryptography research and design efforts that underpin the Filecoin protocol on the [Filecoin Research website](https://github.com/filecoin-project/research). The [CryptoLab at Protocol Labs](https://research.protocol.ai/groups/cryptolab/) also actively researches improvements.
+
+### Code of conduct
+
+The Filecoin community believes that our mission is best served in an environment that is friendly, safe, and accepting, and free from intimidation or harassment. To that end, we ask that everyone involved in Filecoin read and respect our [code of conduct](https://github.com/filecoin-project/community/blob/master/CODE\_OF\_CONDUCT.md).
diff --git a/basics/project-and-community/ways-to-contribute.md b/basics/project-and-community/ways-to-contribute.md
new file mode 100644
index 000000000..31c178e8a
--- /dev/null
+++ b/basics/project-and-community/ways-to-contribute.md
@@ -0,0 +1,394 @@
+---
+description: >-
+ So you want to contribute to Filecoin and the ecosystem? Here is a quick
+ listing of things to which you can contribute and an overview on how you can
+ get started.
+---
+
+# Ways to contribute
+
+### Ways to contribute
+
+#### Code
+
+Filecoin and its sister-projects are big, with lots of code written in multiple languages. We always need help writing and maintaining code, but it can be daunting to just jump in. We use the label _Help Wanted_ on features or bug fixes that people can help out with. They are an excellent place for you to start contributing code.
+
+The biggest and most active repositories we have today are:
+
+* [`filecoin-project/venus`](https://github.com/filecoin-project/venus)
+* [`filecoin-project/lotus`](https://github.com/filecoin-project/lotus)
+* [`filecoin-project/rust-fil-proofs`](https://github.com/filecoin-project/rust-fil-proofs)
+
+If you want to start contributing to the core of Filecoin, those repositories are a great place start. But the _Help Wanted_ label exists in several related projects:
+
+* [IPFS](https://github.com/ipfs)
+* [libp2p](https://github.com/libp2p)
+* [IPLD](https://github.com/libp2p)
+* [Multiformats](https://github.com/multiformats)
+
+#### Documentation
+
+Filecoin is a huge project and undertaking, and with lots of code comes the need for lots of good documentation! However, we need a lot more help to write the awesome docs the project needs. If writing technical documentation is your area, any and all help is welcome!
+
+Before contributing to the Filecoin docs, please read these quick guides; they’ll save you time and help keep the docs accurate and consistent!
+
+1. [Style and formatting guide](ways-to-contribute.md#style)
+2. [Writing guide](ways-to-contribute.md#writing-guide)
+
+If you have never contributed to an open-source project before, or just need a refresher, take a look at the [contribution tutorial](ways-to-contribute.md#contribution-tutorial).
+
+#### Community
+
+If interacting with people is your favorite thing to do in this world, join the [Filecoin chat and discussion forums](chat-and-discussion-forums.md) to say hello, meet others who share your goals, and connect with other members of the community. You should also consider joining [Filecoin Slack](https://filecoin.io/slack).
+
+#### Build Applications
+
+Filecoin is designed for you to integrate into your own applications and services.
+
+Get started by looking at the list of projects currently built on Filecoin. Build anything you think is missing! If you’re unsure about something, you can join the chat and discussion forums to get help or feedback on your specific problem/idea. You can also join a Filecoin Hackathon, apply for a Filecoin Developer Grant or apply to the Filecoin accelerator program to support the development of your project.
+
+* [Filecoin Hackathons](https://hackathons.filecoin.io/)
+* [Filecoin Developer Grants](https://filecoin.io/grants/)
+* [Filecoin Accelerator Program](https://ecosystem-wg.notion.site/Protocol-Labs-Accelerator-Program-d45d8792a7d544eca9beb7d3e3d3b05d)
+
+#### Protocol Design
+
+Filecoin is ultimately about building better protocols, and the community always welcome ideas and feedback on how to improve those protocols.
+
+* [`filecoin-project/specs`](https://github.com/filecoin-project/specs)
+
+#### Research
+
+Finally, we see Protocol Labs as a research lab, where YOUR ideas can become technologies that have a real impact on the world. If you’re interested in contributing to our research, please reach out to [research@protocol.ai](mailto:research@protocol.ai) for more information. Include what your interests are so we can make sure you get to work on something fun and valuable.
+
+### Writing guide
+
+This guide explains things to keep in mind when writing for Filecoin’s documentation. While the [grammar, formatting, and style guide](ways-to-contribute.md#style) lets you know the rules you should follow, this guide will help you to properly structure your writing and choose the correct tone for your audience.
+
+#### Walkthroughs
+
+The purpose of a walkthrough is to tell the user _how_ to do something. They do not need to convince the reader of something or explain a concept. Walkthroughs are a list of steps the reader must follow to achieve a process or function.
+
+The vast majority of documentation within the Filecoin documentation project falls under the _Walkthrough_ category. Walkthroughs are generally quite short, have a neutral tone, and teach the reader how to achieve a particular process or function. They present the reader with concrete steps on where to go, what to type, and things they should click on. There is little to no _conceptual_ information within walkthroughs.
+
+**Goals**
+
+Use the following goals when writing walkthroughs:
+
+Goal Keyword Explanation Audience General Easy for anyone to read with minimal effort. Formality Neutral Slang is restricted, but standard casual expressions are allowed. Domain Technical Acronyms and tech-specific language is used and expected. Tone Neutral Writing contains little to no emotion. Intent Instruct Tell the reader how to do something.
+
+**Function or process**
+
+The end goal of a walkthrough is for the reader to achieve a very particular function. _Installing the Filecoin Desktop application_ is an example. Following this walkthrough isn’t going to teach the reader much about working with the decentralized web or what Filecoin is. Still, by the end, they’ll have the Filecoin Desktop application installed on their computer.
+
+**Short length**
+
+Since walkthroughs cover one particular function or process, they tend to be quite short. The estimated reading time of a walkthrough is somewhere between 2 and 10 minutes. Most of the time, the most critical content in a walkthrough is presented in a numbered list. Images and GIFs can help the reader understand what they should be doing.
+
+If a walkthrough is converted into a video, that video should be no longer than 5 minutes.
+
+**Walkthrough structure**
+
+Walkthroughs are split into three major sections:
+
+1. What we’re about to do.
+2. The steps we need to do.
+3. Summary of what we just did, and potential next steps.
+
+#### Conceptual articles
+
+Articles are written with the intent to inform and explain something. These articles don’t contain any steps or actions that the reader has to perform _right now_.
+
+These articles are vastly different in tone when compared to walkthroughs. Some topics and concepts can be challenging to understand, so creative writing and interesting diagrams are highly sought-after for these articles. Whatever writers can do to make a subject more understandable, the better.
+
+**Article goals**
+
+Use the following goals when writing conceptual articles:
+
+Goal Keyword Explanation Audience Knowledgeable Requires a certain amount of focus to understand. Formality Neutral Slang is restricted, but standard casual expressions are allowed. Domain Any Usually technical , but depends on the article. Tone Confident and friendly The reader must feel confident that the writer knows what they’re talking about. Intent Describe Tell the reader why something does the thing that it does, or why it exists.
+
+**Article structure**
+
+Articles are separated into five major sections:
+
+1. Introduction to the thing we’re about to explain.
+2. What the thing is.
+3. Why it’s essential.
+4. What other topics it relates to.
+5. Summary review of what we just read.
+
+#### Tutorials
+
+When writing a tutorial, you’re teaching a reader how to achieve a complex end-goal. Tutorials are a mix of walkthroughs and conceptual articles. Most tutorials will span several pages, and contain multiple walkthroughs within them.
+
+Take the hypothetical tutorial _Get up and running with Filecoin_, for example. This tutorial will likely have the following pages:
+
+1. A brief introduction to what Filecoin is.
+2. Choose and install a command line client.
+3. Understanding storage deals.
+4. Import and store a file.
+
+Pages `1` and `3` are conceptual articles, describing particular design patterns and ideas to the reader. All the other pages are walkthroughs instructing the user how to perform one specific action.
+
+When designing a tutorial, keep in mind the walkthroughs and articles that already exist, and note down any additional content items that would need to be completed before creating the tutorial.
+
+### Grammar and formatting
+
+Here are some language-specific rules that the Filecoin documentation follows. If you use a writing service like [Grammarly](https://www.grammarly.com/), most of these rules are turned on by default.
+
+#### American English
+
+While Filecoin is a global project, the fact is that American English is the most commonly used _style_ of English used today. With that in mind, when writing content for the Filecoin project, use American English spelling. The basic rules for converting other styles of English into American English are:
+
+1. Swap the `s` for a `z` in words like _categorize_ and _pluralize_.
+2. Remove the `u` from words like _color_ and _honor_.
+3. Swap `tre` for `ter` in words like _center_.
+
+#### The Oxford comma
+
+In a list of three or more items, follow each item except the last with a comma `,`:
+
+| Use | Don’t use |
+| ----------------------------- | ---------------------------- |
+| One, two, three, and four. | One, two, three and four. |
+| Henry, Elizabeth, and George. | Henry, Elizabeth and George. |
+
+#### References to Filecoin
+
+As a proper noun, the name “Filecoin” (capitalized) should be used only to refer to the overarching project, to the protocol, or to the project’s canonical network:
+
+> Filecoin \[the project] has attracted contributors from around the globe! Filecoin \[the protocol] rewards contributions of data storage instead of computation! Filecoin \[the network] is currently storing 50 PiB of data!
+
+The name can also be used as an adjective:
+
+> The Filecoin ecosystem is thriving! I love contributing to Filecoin documentation!
+
+When referring to the token used as Filecoin’s currency, the name `FIL`, is preferred. It is alternatively denoted by the Unicode symbol for an integral with a double stroke ⨎:
+
+* Unit prefix: **100 FIL**.
+* Symbol prefix: **⨎ 100**.
+
+The smallest and most common denomination of FIL is the `attoFIL` (10^-18 FIL).
+
+> The collateral for this storage deal is 5 FIL. I generated ⨎100 as a storage provider last month!
+
+Examples of discouraged usage:
+
+> Filecoin rewards storage providers with Filecoin. There are many ways to participate in the filecoin community. My wallet has thirty filecoins.
+
+Consistency in the usage of these terms helps keep these various concepts distinct.
+
+#### References to Lotus
+
+Lotus is the main implementation of Filecoin. As such, it is frequently referenced in the Filecoin documentation. When referring to the Lotus implementation, use a capital _L_. A lowercase _l_ should only be used when referring to the Lotus executable commands such as `lotus daemon`. Lotus executable commands should always be within code blocks:
+
+````markdown
+1. Start the Lotus daemon:
+
+ ```shell
+ lotus daemon
+ ```
+
+2. After your Lotus daemon has been running for a few minutes, use `lotus` to check the number of other peers that it is connected to in the Filecoin network:
+
+ ```shell
+ lotus net peers
+ ```
+````
+
+#### Acronyms
+
+If you have to use an acronym, spell the full phrase first and include the acronym in parentheses `()` the first time it is used in each document. Exception: This generally isn’t necessary for commonly-encountered acronyms like _IPFS_, unless writing for a stand-alone article that may not be presented alongside project documentation.
+
+> Virtual Machine (VM), Decentralized Web (DWeb).
+
+### Formatting
+
+How the Markdown syntax looks, and code formatting rules to follow.
+
+#### Syntax
+
+The Filecoin Docs project follows the _GitHub Flavoured Markdown_ syntax for markdown. This way, all articles display properly within GitHub itself. This gives readers the option to view articles on [the docs website](http://localhost:5000/o/NNmD4UvLc26b1TmEYgzE/s/ljEfPn4P3ow2x1OCSoi4/) or [its GitHub repo](https://github.com/filecoin-project/filecoin-docs).
+
+#### Rules
+
+We use the rules set out in the [VSCode Markdownlint](https://github.com/DavidAnson/vscode-markdownlint) extension. You can import these rules into any text editor like Vim or Sublime. All rules are listed [within the Markdownlint repository](https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md).
+
+We highly recommend installing [VSCode](https://code.visualstudio.com/) with the [Markdownlint](https://github.com/DavidAnson/vscode-markdownlint) extension to help with your writing. The extension shows warnings within your markdown whenever your copy doesn’t conform to a rule.
+
+### Style
+
+The following rules explain how we organize and structure our writing. The rules outlined here are in addition to the [rules](https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md) found within the [Markdownlinter extension](https://github.com/DavidAnson/vscode-markdownlint).
+
+#### Text
+
+The following rules apply to editing and styling text.
+
+**Titles**
+
+1. All titles follow sentence structure. Only _names_ and _places_ are capitalized, along with the first letter of the title. All other letters are lower-case:
+
+## This is a title
+
+ ### Only capitalize names and places
+
+### The capital city of France is Paris
+
+
+2. Every article starts with a _front-matter_ title and description:
+
+```markdown
+---
+title: Example article
+description: This is a brief description that shows up in link teasers in services like Twitter and Slack.
+---
+
+## This is a subtitle
+
+Example body text.
+```
+
+In the above example `title:` serves as a `` or `#` tag. There is only ever one title of this level in each article.
+
+3. Titles do not contain punctuation. If you have a question within your title, rephrase it as a statement:
+
+```markdown
+
+## What is Filecoin?
+
+
+## Filecoin explained
+```
+
+**Bold text**
+
+Double asterisks `**` are used to define **boldface** text. Use bold text when the reader must interact with something displayed as text: buttons, hyperlinks, images with text in them, window names, and icons.
+
+```markdown
+In the **Login** window, enter your email into the **Username** field and click **Sign in**.
+```
+
+**Italics**
+
+Underscores `_` are used to define _italic_ text. Style the names of things in italics, except input fields or buttons:
+
+```markdown
+Here are some American things:
+
+- The _Spirit of St Louis_.
+- The _White House_.
+- The United States _Declaration of Independence_.
+
+Try entering them into the **American** field and clicking **Accept**.
+```
+
+Quotes or sections of quoted text are styled in italics and surrounded by double quotes `"`:
+
+```markdown
+In the wise words of Winnie the Pooh _"People say nothing is impossible, but I do nothing every day."_
+```
+
+**Code blocks**
+
+Tag code blocks with the syntax of the core they are presenting:
+
+````markdown
+ ```javascript
+ console.log(error);
+ ```
+````
+
+Output from command-line actions can be displayed by adding another codeblock directly after the input codeblock. Here’s an example telling the use to run `go version` and then the output of that command in a seperate codeblock immediately after the first:
+
+````markdown
+ ```shell
+ go version
+ ```
+
+ ```plaintext
+ go version go1.19.7 darwin/arm64
+ ```
+````
+
+Command-line examples can be truncated with three periods `...` to remove extraneous information:
+
+````markdown
+ ```shell
+ lotus-miner info
+ ```
+
+ ```shell
+ Miner: t0103
+ Sector Size: 16.0 MiB
+ ...
+ Sectors: map[Committing:0 Proving:0 Total:0]
+ ```
+````
+
+**Inline code tags**
+
+Surround directories, file names, and version numbers between inline code tags `` ` ``.
+
+```markdown
+Version `1.2.0` of the program is stored in `~/code/examples`. Open `exporter.exe` to run the program.
+```
+
+**List items**
+
+All list items follow sentence structure. Only _names_ and _places_ are capitalized, along with the first letter of the list item. All other letters are lowercase:
+
+1. Never leave Nottingham without a sandwich.
+2. Brian May played guitar for Queen.
+3. Oranges.
+
+List items end with a period `.`, or a colon `:` if the list item has a sub-list:
+
+1. Charles Dickens novels:
+ 1. Oliver Twist.
+ 2. Nicholas Nickelby.
+ 3. David Copperfield.
+2. J.R.R Tolkien non-fiction books:
+ 1. The Hobbit.
+ 2. Silmarillion.
+ 3. Letters from Father Christmas.
+
+**Unordered lists**
+
+Use the dash character `-` for un-numbered list items:
+
+```markdown
+- An apple.
+- Three oranges.
+- As many lemons as you can carry.
+- Half a lime.
+```
+
+**Special characters**
+
+Whenever possible, spell out the name of the special character, followed by an example of the character itself within a code block.
+
+```markdown
+Use the dollar sign `$` to enter debug-mode.
+```
+
+**Keyboard shortcuts**
+
+When instructing the reader to use a keyboard shortcut, surround individual keys in code tags:
+
+```shell
+Press `ctrl` + `c` to copy the highlighted text.
+```
+
+The plus symbol `+` stays outside of the code tags.
+
+#### Images
+
+The following rules and guidelines define how to use and store images.
+
+**Alt text**
+
+All images contain alt text so that screen-reading programs can describe the image to users with limited sight:
+
+```markdown
+
+```
diff --git a/basics/the-blockchain/README.md b/basics/the-blockchain/README.md
new file mode 100644
index 000000000..fe8272fd4
--- /dev/null
+++ b/basics/the-blockchain/README.md
@@ -0,0 +1,6 @@
+---
+description: This section covers the basic concepts surrounding the Filecoin blockchain.
+---
+
+# The blockchain
+
diff --git a/basics/the-blockchain/actors.md b/basics/the-blockchain/actors.md
new file mode 100644
index 000000000..614bdfd49
--- /dev/null
+++ b/basics/the-blockchain/actors.md
@@ -0,0 +1,129 @@
+---
+description: >-
+ Actors are smart contracts that run on the Filecoin virtual machine (FVM), and
+ are used to manage, query and update the state of the Filecoin network. Smart
+ contracts are small, self-executing blocks
+---
+
+# Actors
+
+For those familiar with the Ethereum virtual machine (EVM), _actors_ work similarly to [smart contracts](broken-reference). In the Filecoin network, there are two types of actors:
+
+* [_Built-in actors_](actors.md#built-in-actors): Hardcoded programs, written ahead of time by network engineers that manage and orchestrate key subprocesses and subsystems in the Filecoin network.
+* [_User actors_](actors.md#user-actors-smart-contracts): Code implemented by **any developer** that interacts with the Filecoin Virtual Machine (FVM).
+
+## Built-in actors
+
+Built-in actors are how the Filecoin network manages and updates _global state_. The _global state_ of the network at a given epoch can be thought of as the set of blocks agreed upon via network consensus in that epoch. This global state is represented as a _state tree_, which maps an actor to an _actor state_. An _actor state_ describes the current conditions for an individual actor, such as its FIL balance and its nonce. In Filecoin, actors trigger a _state transition_ by sending a _message_. Each block in the chain can be thought of as a **proposed** global state, where the block selected by network consensus sets the **new** global state. Each block contains a series of messages, and a checkpoint of the current global state after the application of those messages. The Filecoin Virtual Machine (FVM) is the Filecoin network component that is in charge of execution of all actor code.
+
+A basic example of how actors are used in Filecoin is the process by which storage providers prove storage and are subsequently rewarded. The process is as follows:
+
+1. The [`StorageMinerActor`](actors.md#storagemineractor) processes proof of storage from a storage provider.
+2. The storage provider is awarded storage power based on whether the proof is valid or not.
+3. The [`StoragePowerActor`](actors.md#storagepoweractor) accounts for the storage power.
+4. During block validation, the [`StoragePowerActor`](actors.md#storagepoweractor)’s state, which includes information on storage power allocated to each storage provider, is read.
+5. Using the state information, the consensus mechanism randomly awards blocks to the storage providers with the most power, and the [`RewardActor`](actors.md#rewardactor) sends FIL to storage providers.
+
+### Blocks
+
+Each block in the Filecoin chain contains:
+
+* Inline data such as current block height.
+* A pointer to the current state tree.
+* A pointer to the set of messages that, when applied to the network, generated the current state tree.
+
+### State tree
+
+A [Merkle Directed Acyclic Graph (Merkle DAG)](../../reference/general/glossary.md#merkle-directed-acyclic-graph) is used to map the state tree and the set of messages. Nodes in the state tree contain information on:
+
+* Actors, like FIL balance, nonce and a pointer (CID) to actor state data.
+* Messages in the current block
+
+### Messages
+
+Like the state tree, a Merkle Directed Acyclic Graph (Merkle DAG) is used to map the set of messages for a given block. Nodes in the messages map contain information on:
+
+* The actor the message was sent to
+* The actor that sent the message
+* Target method to call on actor being sent the message
+* A cryptographic signature for verification
+* The amount of FIL transferred between actors
+
+### Actor code
+
+The code that defines an actor in the Filecoin network is separated into different methods. Messages sent to an actor contain information on which method(s) to call, and the input parameters for those methods. Additionally, actor code interacts with a _runtime_ object, which contains information on the general state of network, such as the current epoch, and cryptographic signatures and proof validations. Like smart contracts in other blockchains, actors must pay a _gas fee_, which is some predetermined amount of FIL to offset the cost (network resources used, etc.) of a transaction. Every actor has a Filecoin balance attributed to it, a state pointer, a code which tells the system what type of actor it is, and a nonce, which tracks the number of messages sent by this actor
+
+### Types of built-in actors
+
+The 11 different types of built-in actors are as follows:
+
+* [CronActor](actors.md#cronactor)
+* [InitActor](actors.md#initactor)
+* [AccountActor](actors.md#accountactor)
+* [RewardActor](actors.md#rewardactor)
+* [StorageMarketActor](actors.md#storagemarketactor)
+* [StorageMinerActor](actors.md#storagemineractor)
+* [MultisigActor](actors.md#multisigactor)
+* [PaymentChannelActor](actors.md#paymentchannelactor)
+* [StoragePowerActor](actors.md#storagepoweractor)
+* [VerifiedRegistryActor](actors.md#verifiedregistryactor)
+* [SystemActor](actors.md#systemactor)
+
+#### CronActor
+
+The `CronActor` sends messages to the `StoragePowerActor` and `StorageMarketActor` at the end of each epoch. The messages sent by `CronActor` indicate to StoragePowerActor and StorageMarketActor how they should maintain internal state and process deferred events. This system actor is instantiated in the genesis block, and interacts directly with the FVM.
+
+#### InitActor
+
+The `InitActor` can initialize new actors on the Filecoin network. This system actor is instantiated in the genesis block, and maintains a table resolving a public key and temporary actor addresses to their canonical ID addresses. The `InitActor` interacts directly with the FVM.
+
+#### AccountActor
+
+The `AccountActor` is responsible for user accounts. Account actors are not created by the `InitActor`, but by sending a message to a public-key style address. The account actor updates the state tree with new actor address, and interacts directly with the FVM.
+
+#### RewardActor
+
+The `RewardActor` manages unminted Filecoin tokens, and distributes rewards directly to miner actors, where they are locked for vesting. The reward value used for the current epoch is updated at the end of an epoch. The `RewardActor` interacts directly with the FVM.
+
+#### StorageMarketActor
+
+The `StorageMarketActor` is responsible for processing and managing on-chain deals. This is also the entry point of all storage deals and data into the system. This actor keeps track of storage deals, and the of locked balances of both the client storing data and the storage provider. When a deal is posted on chain through the `StorageMarketActor`, the actor will first check if both transacting parties have sufficient balances locked up and include the deal on chain. Additionally, the `StorageMarketActor` holds _Storage Deal Collateral_ provided by the storage provider to collateralize deals. This collateral is returned to the storage provider when all deals in the sector successfully conclude. This actor does not interact directly with the FVM.
+
+#### StorageMinerActor
+
+The `StorageMinerActor` is created by the `StoragePowerActor`, and is responsible for storage mining operations and the collection of mining proofs. This actor is a key part of the Filecoin storage mining subsystem, which ensures a storage miner can effectively commit storage to the Filecoin, handles the following:
+
+* Committing new storage
+* Continuously proving storage
+* Declaring storage faults
+* Recovering from storage faults
+
+This actor does not interact directly with the FVM.
+
+#### MultisigActor
+
+The `MultisigActor` is responsible for dealing with operations involving the Filecoin wallet, and represents a group of transaction signers, with a maximum of 256. Signers may be external users or the `MultisigActor` itself. This actor does not interact directly with the FVM.
+
+#### PaymentChannelActor
+
+The `PaymentChannelActor` creates and manages _payment channels_, a mechanism for off-chain microtransactions for Filecoin dApps to be reconciled on-chain at a later time with less overhead than a standard on-chain transaction, and no gas costs. Payment channels are uni-directional and can be funded by adding to their balance. To create a payment channel and deposit fund, a user calls the `PaymentChannelActor`. This actor does not interact directly with the FVM.
+
+#### StoragePowerActor
+
+The `StoragePowerActor` is responsible for keeping track of the storage power allocated to each storage miner, and has the ability to create a `StorageMinerActor`. This actor does not interact directly with the FVM.
+
+#### VerifiedRegistryActor
+
+The `VerifiedRegistryActor` is responsible for managing Filecoin Plus (Fil+) clients. This actor can add a verified client to the Fil+ program, remove and reclaim expired DataCap allocations and manage claims. This actor does not interact directly with the FVM.
+
+#### SystemActor
+
+For more information on `SystemActor`, see the [source code](https://github.com/filecoin-project/specs-actors/blob/master/actors/builtin/system/system\_actor.go).
+
+## User actors (smart contracts)
+
+A _user actor_ is code defined by **any developer** that can interact FVM, otherwise known as a _smart contract_.
+
+A _smart contract_ is a small, self-executing blocks of custom code that runs on other blockchains, like Ethereum. In the Filecoin network, the term is a synonym for [_user actor_](actors.md#user-actors-smart-contracts). You may see the term _smart contract_ used in tandem with _user actor_, but there is no difference between the two.
+
+With the FVM, actors can be written in Solidity. In future updates, any language that compiles to WASM will be supported. With user actors, users can create and enforce custom rules for storing and accessing data on the network. The FVM is responsible for actors and ensuring that they are executed correctly and securely.
diff --git a/basics/the-blockchain/addresses.md b/basics/the-blockchain/addresses.md
new file mode 100644
index 000000000..ab128ff8a
--- /dev/null
+++ b/basics/the-blockchain/addresses.md
@@ -0,0 +1,71 @@
+---
+description: >-
+ A Filecoin address is an identifier that refers to an actor in the Filecoin
+ state. All actors (miner actors, the storage market actor, account actors)
+ have an address.
+---
+
+# Addresses
+
+All Filecoin addresses begin with an `f` to indicate the network (Filecoin), followed by any of the address prefix numbers (`0`, `1`, `2`, `3`, `4`) to indicate the address type. There are five address types:
+
+ Address prefix Description 0
An ID address. 1
A SECP256K1 public key address. 2
An actor address. 3
A BLS public key address. 4
Extensible, user-defined actor addresses. f410
addresses refers to Ethereum-compatible address space, each f410
address is equivalent to an 0x
address.
+
+Each of the address types is described below.
+
+## Actor IDs
+
+All actors have a short integer assigned to them by `InitActor`, a unique actor that can create _new_ actors. This integer that gets assigned is the ID of that actor. An _ID address_ is an actor’s ID prefixed with the network identifier and the address type.
+
+Actor ID addresses are not _robust_, in the sense that they do depend on chain state, and are defined on-chain by the `InitActor`. Additionally, actor IDs can change for a brief time after creation if the same ID is assigned to different actors on different forks. Actor ID addresses are similar to monotonically increasing numeric primary keys in a relational database. So, when a chain reorganization occurs (similar to a rollback in a SQL database), you can refer to the same ID for different rows. The expected consensus algorithm will resolve the conflict. Once the state that defines a new ID reaches finality, no changes can occur, and the ID is bound to that actor forever.
+
+For example, the mainnet burn account ID address, `f099`, is structured as follows:
+
+```plaintext
+ Address type
+ |
+f 0 9 9
+| |
+| Actor ID
+|
+Network identifier
+```
+
+ID addresses are often referred to by their shorthand `f0`.
+
+## Public keys
+
+Actors managed directly by users, like accounts, are derived from a public-private key pair. If you have access to a private key, you can sign messages sent from that actor. The public key is used to derive an address for the actor. Public key addresses are referred to as _robust addresses_ as they do not depend on the Filecoin chain state.
+
+Public key addresses allow devices, like hardware wallets, to derive a valid Filecoin address for your account using just the public key. The device doesn’t need to ask a remote node what your ID address is. Public key addresses provide a concise, safe, human-readable way to reference actors before the chain state is final. ID addresses are used as a space-efficient way to identify actors in the Filecoin chain state, where every byte matters.
+
+Filecoin supports two types of public key addresses:
+
+* [`secp256k1` addresses](https://en.bitcoin.it/wiki/Secp256k1) that begin with the prefix `f1`.
+* [BLS addresses](https://en.wikipedia.org/wiki/BLS\_digital\_signature) that begin with the prefix `f3`.
+
+For BLS addresses, Filecoin uses `curve bls12-381` for BLS signatures, which is a pair of two related curves, `G1` and `G2`.
+
+Filecoin uses `G1` for public keys, as G1 allows for a smaller representation of public keys, and `G2` for signatures. This implements the same design as ETH2, but contrasts with Zcash, which has signatures on `G1` and public keys on `G2`. However, unlike ETH2, which stores private keys in big-endian order, Filecoin stores and interprets private keys in little-endian order.
+
+Public key addresses are often referred to by their shorthand, `f1` or `f3`.
+
+## Actors
+
+Actor addresses provide a way to create robust addresses for actors not associated with a public key. They are generated by taking a `sha256` hash of the output of the account creation. The ZH storage provider has the actor address `f2plku564ddywnmb5b2ky7dhk4mb6uacsxuuev3pi` and the ID address `f01248`.
+
+Actor addresses are often referred to by their shorthand, `f2`.
+
+## Extensible user-defined actors
+
+Filecoin supports extensible, user-defined actor addresses through the `f4` address class, introduced in [Filecoin Improvement Proposal (FIP) 0048](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0048.md). The `f4` address class provides the following benefits to the network:
+
+* A predictable addressing scheme to support interactions with addresses that do not yet exist on-chain.
+* User-defined, custom addressing systems without extensive changes and network upgrades.
+* Support for native addressing schemes from foreign runtimes such as the EVM.
+
+An `f4` address is structured as `f4f`, where `` is the actor ID of the _address manager_, and `` is the arbitrary actor ID chosen by that actor. An _address manager_ is an actor that can create new actors and assign an `f4` address to the new actor.
+
+Currently, per [FIP 0048](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0048.md), `f4` addresses may only be assigned by and in association with specific, built-in actors called _address managers_. Once users are able to deploy custom WebAssembly actors, this restriction will likely be relaxed, in a future FIP.
+
+As an example: suppose an address manager has an actor ID (an `f0` address) `123`, and that address manager creates a new actor. Then, the `f4` address of the actor created by the address manager is `f4123fa3491xyz`, where `f4` is the address class, `123` is the actor ID of the address manager, `f` is a separator, and `a3491xyz` is the arbitrary `` chosen by that actor.
diff --git a/basics/the-blockchain/blocks-and-tipsets.md b/basics/the-blockchain/blocks-and-tipsets.md
new file mode 100644
index 000000000..1bd725e3a
--- /dev/null
+++ b/basics/the-blockchain/blocks-and-tipsets.md
@@ -0,0 +1,64 @@
+---
+description: >-
+ Like many other blockchains, blocks are a fundamental concept in Filecoin.
+ Unlike other blockchains, Filecoin is a chain of groups of blocks called
+ tipsets, rather than a chain of individual blocks.
+---
+
+# Blocks and tipsets
+
+## Blocks
+
+In Filecoin, a block consists of:
+
+* A block header
+* A list of _messages_ contained in the block
+* A signed copy of each message listed
+
+Every block refers to at least one _parent block_; that is, a block produced in a prior epoch.
+
+A _message_ represents communication between two actors, and thus changes in network state. The messages are listed in their order of appearance, deduplicated and returned in canonical order of execution. So, in other words, a block describes all changes to the network state in a given epoch.
+
+### Blocktime
+
+Blocktime is a concept that represents the average time it takes to mine or produce a new block on a blockchain. In Ethereum, for example, the blocktime is approximately 15 seconds on average, meaning that a new block is added to the Ethereum blockchain roughly every 15 seconds.
+
+In the Filecoin network, storage providers compete to produce blocks by providing storage capacity and participating in the consensus protocol. The block time determines how frequently new blocks are added to the blockchain, which impacts the overall speed and responsiveness of the network.
+
+Filecoin has a block time of 30 seconds, and this duration was chosen for two main reasons:
+
+* **Hardware requirements**: If the block time were faster while maintaining the same gas limit or the number of messages per block, it would lead to increased hardware requirements. This includes the need for more storage space to accommodate the larger chain data resulting from more frequent block production.
+* **Storage provider operations**: The block time also takes into account the various operations that occur during that duration on the storage provider (SP) side. As SPs generate new blocks, the 30 second block time allows for the necessary processes and computations to be carried out effectively. If the blocktime were shorter, SPs would encounter significantly more blocktime failures.
+
+By considering these factors, the Filecoin network has established a block time of 30 seconds, balancing the need for efficient operations and hardware requirements.
+
+## Tipsets
+
+As described in [Consensus](consensus.md), multiple potential block producers may be elected via Expected Consensus (EC) to create a block in each epoch, which means that more than one valid block may be produced in a given epoch. All valid blocks with the same height and same parent block are assembled into a group called a _tipset_.
+
+### Benefits of tipsets
+
+In other blockchains, blocks are used as the fundamental representation of network state; that is, the overall status of each participant in the network at a given time. However, this structure has the following disadvantages:
+
+* Potential block producers may be hobbled by network latency.
+* Not all valid work is rewarded.
+* Decentralization and collaboration in block production is not incentivized.
+
+Because Filecoin is a chain of tipsets rather than individual blocks, the network enjoys the following benefits:
+
+* All valid blocks generated in a given round are used to determine network state, increasing network efficiency and throughput.
+* All valid work is rewarded (that is, all validated block producers in an epoch receive a block reward).
+* All potential block producers are incentivized to produce blocks, disincentivizing centralization and promoting collaboration.
+* Because all blocks in a tipset have the same height and parent, Filecoin is able to achieve rapid convergence in the case of forks.
+
+In summary, blocks, which contain actor messages, are grouped into tipsets in each epoch, which can be thought of as the overall description of network state for a given epoch.
+
+### Tipsets in the Ethereum JSON-RPC
+
+Wherever you see the term _block_ in the Ethereum JSON-RPC, you should mentally read _tipset_. Before the inclusion of the Filecoin EVM runtime, there was no single hash referring to a tipset. A tipset ID was the concatenation of block CIDs, which led to a variable length ID, and poor user experience.
+
+With the Ethereum JSON-RPC, we introduced the concept of the _tipset CID_ for the first time. It is calculated by hashing the former _tipset key_ using a blake-256 hash. Therefore, when you see the term:
+
+* _block hash_, think _tipset hash_.
+* _block height_, think _tipset epoch_.
+* _block messages_, think _messages in all blocks in a tipset, in their order of appearance, deduplicated and returned in canonical order of execution_.
diff --git a/basics/the-blockchain/consensus.md b/basics/the-blockchain/consensus.md
new file mode 100644
index 000000000..3680137ed
--- /dev/null
+++ b/basics/the-blockchain/consensus.md
@@ -0,0 +1,38 @@
+---
+description: >-
+ In the Filecoin blockchain, network consensus is achieved using the Expected
+ Consensus (EC) algorithm, a secret, fair and verifiable consensus protocol
+ used by the network to agree on the chain state.
+---
+
+# Consensus
+
+## Overview
+
+In the Filecoin blockchain, network _consensus_ is achieved using the Expected Consensus (EC) algorithm, a probabilistic, _Byzantine fault-tolerant_ consensus protocol. At a high-level, EC achieves consensus by running a secret, fair and verifiable _leader election_ at every [epoch](../../reference/general/glossary.md#epoch) where a set number of participants may become eligible to submit a block to the chain based on fair and verifiable criteria.
+
+## Properties
+
+Expected Consensus (EC) has the following properties:
+
+* Each epoch has potentially multiple elected leaders who may propose a block.
+* A winner is elected randomly from a set of network participants weighted according to the respective storage power they contribute to the Filecoin network.
+* All blocks proposed are grouped together in a _tipset_, from which the final chain is selected.
+* A block producer can be verified by any participant in the network.
+* The identity of a block producer is anonymous until they release their block to the network.
+
+## Steps
+
+In summary, EC involves the following steps at each _epoch_:
+
+1. A storage provider checks to see if they are elected to propose a block by generating an _election proof_.
+2. Zero, one or multiple storage providers may be elected to propose a block. This does not mean that an elected participant is guaranteed to be able to submit a block. In the case where:
+ * **No storage providers are elected to propose a block in a given epoch**, a new election is run in the next epoch to ensure that the network remains live.
+ * **One or more storage providers are elected to propose a block in a given epoch**, each must generate a _WinningPoSt proof-of-storage_ to be eligible to actually submit a block.
+3. Each potential block producer elected generates a storage proof using [WinningPoSt](../../reference/general/glossary.md#winning-proof-of-spacetime-winningpost) for a randomly selected [_sector_](../../reference/general/glossary.md#sector) within in short window of time. Potential block producers that fail this step are not eligible to produce a block. In this step, the following could occur:
+ * **All potential block producers fail WinningPoSt**, in which case EC returns to step 1 (described above).
+ * **One or more potential block producers passes WinningPoSt**, which means they are eligible to submit that block to the epochs tipset.
+4. Blocks generated by block producers are grouped into a [tipset](../../reference/general/glossary.md#tipset).
+5. The tipset that reflects the biggest amount of committed storage on the network is selected.
+6. Using the selected tipset, the chain state is propagated.
+7. EC returns to step 1 in the next epoch.
diff --git a/basics/the-blockchain/drand.md b/basics/the-blockchain/drand.md
new file mode 100644
index 000000000..8cb0ed001
--- /dev/null
+++ b/basics/the-blockchain/drand.md
@@ -0,0 +1,65 @@
+---
+description: >-
+ Drand, pronounced dee-rand, is a distributed randomness beacon daemon written
+ in Golang.
+---
+
+# Drand
+
+This page covers how Drand is used within the Filecoin network. For more information on Drand generally, [take a look at the project’s documentation](https://drand.love/about/).
+
+## Randomness outputs
+
+By polling the appropriate endpoint, a Filecoin node will get back a Drand value formatted as follows:
+
+```json
+{
+ "round": 367,
+ "signature": "b62dd642e939191af1f9e15bef0f0b0e9562a5f570a12a231864afe468377e2a6424a92ccfc34ef1471cbd58c37c6b020cf75ce9446d2aa1252a090250b2b1441f8a2a0d22208dcc09332eaa0143c4a508be13de63978dbed273e3b9813130d5",
+ "previous_signature": "afc545efb57f591dbdf833c339b3369f569566a93e49578db46b6586299422483b7a2d595814046e2847494b401650a0050981e716e531b6f4b620909c2bf1476fd82cf788a110becbc77e55746a7cccd47fb171e8ae2eea2a22fcc6a512486d"
+}
+```
+
+* `signature`: the threshold BLS signature on the previous signature value previous and the current round number round.
+* `previous_signature`: the threshold BLS signature from the previous Drand round.
+* `round`: the index of randomness in the sequence of all random values produced by this Drand network.
+
+The message signed is the concatenation of the round number treated as a uint64 and the previous signature. At the moment, Drand uses BLS signatures on the BLS12-381 curve with the latest v7 RFC of hash-to-curve and the signature is made over G1
+
+## Polling the network
+
+Filecoin nodes fetch the Drand entry from the distribution network of the selected Drand network.
+
+Drand distributes randomness using multiple distribution channels such as HTTP servers, S3 buckets, gossiping, etc. Simply put, the Drand nodes themselves will not be directly accessible by consumers, rather, highly-available relays will be set up to serve Drand values over these distribution channels.
+
+On initialization, Filecoin initializes a Drand client with chain info that contains the following information:
+
+* Period: the period of time between each Drand randomness generation.
+* GenesisTime: at which the first round in the Drand randomness chain is created.
+* PublicKey: the public key to verify randomness.
+* GenesisSeed: the seed that has been used for creating the first randomness.
+
+It is possible to simply store the hash of this chain info and to retrieve the contents from the Drand distribution network as well on the `/info` endpoint.
+
+Thereafter, the Filecoin client can call Drand’s endpoints:
+
+* `/public/latest` to get the latest randomness value produced by the beacon.
+* `/public/` to get the randomness value produced by the beacon at a given round.
+
+## Using Drand
+
+Drand is used as a randomness beacon for leader election in Filecoin. While Drand returns multiple values with every call to the beacon (see above), Filecoin blocks need only store a subset of these in order to track a full Drand chain. This information can then be mixed with on-chain data for use in Filecoin.
+
+## Edge cases and outages
+
+Any Drand beacon outage will effectively halt Filecoin block production. Given that new randomness is not produced, Filecoin miners cannot generate new blocks. Specifically, any call to the Drand network for a new randomness entry during an outage should be blocking in Filecoin.
+
+After a beacon downtime, Drand nodes will work to quickly catch up to the current round. In this way, the above time-to-round mapping in Drand used by Filecoin remains an invariant after this catch-up following downtime.
+
+While Filecoin miners were not able to mine during the Drand outage, they will quickly be able to run leader election thereafter, given a rapid production of Drand values. We call this a _catch up_ period.
+
+During the catch up period, Filecoin nodes will backdate their blocks in order to continue using the same time-to-round mapping to determine which Drand round should be integrated according to the time. Miners can then choose to publish their null blocks for the outage period, including the appropriate Drand entries throughout the blocks, per the time-to-round mapping. Or, as is more likely, try to craft valid blocks that might have been created during the outage.
+
+Based on the level of decentralization of the Filecoin network, we expect to see varying levels of miner collaboration during this period. This is because there are two incentives at play: trying to mine valid blocks from during the outage to collect block rewards, not falling behind a heavier chain being mined by a majority of miners that may or may not have ignored a portion of these blocks.
+
+In any event, a heavier chain will emerge after the catch up period and mining can resume as normal.
diff --git a/basics/the-blockchain/proofs.md b/basics/the-blockchain/proofs.md
new file mode 100644
index 000000000..117147f51
--- /dev/null
+++ b/basics/the-blockchain/proofs.md
@@ -0,0 +1,80 @@
+---
+description: >-
+ In Filecoin cryptographic proving systems, often simply referred to as proofs,
+ are used to validate that a storage provider (SP) is properly storing data.
+---
+
+# Proofs
+
+Different blockchains use different cryptographic proving systems (proofs) based on the network’s specific purpose, goals, and functionality. Regardless of which method is used, proofs have the following in common:
+
+* All blockchain networks seek to achieve [_consensus_](consensus.md) , and rely on proofs as part of this process.
+* Proofs incentivize network participants to behave in certain ways and allow the network to penalize participants that do not abide by network standards.
+* Proofs allow decentralized systems to agree on a network state without a central authority.
+
+Proof-of-Work and Proof-of-Stake are both fairly common proof methods:
+
+* **Proof-of-Work**: nodes in the network solve complex mathematical problems to validate transactions and create new blocks,
+* **Proof-of-Stake**: nodes in the network are chosen to validate transactions and create new blocks based on the amount of cryptocurrency they hold and “stake” in the network.
+
+The Filecoin network aims to provide useful, reliable storage to its participants. With a traditional centralized entity like a cloud storage provider, explicit trust is placed in the entity itself that the data will be stored in a way that meets some minimum set of standards such as security, scalability, retrievability, or replication. Because the Filecoin network is a decentralized network of storage providers (SPs) distributed across the globe, network participants need an automated, trustless, and decentralized way to validate that an SP is doing a good job of handling the data.
+
+In particular, the Filecoin proof process must verify the data was properly stored at the time of the initial request and is continuing to be stored based on the terms of the agreement between the client and the SP. In order for the proof processes to be robust, the process must:
+
+* Target a random part of the data.
+* Occur at a time interval such that it is not possible, profitable, or rational for an SP to discard and re-fetch the copy of data.
+
+In Filecoin, this process is known as _Proof-of-Storage_, and consists of two distinct types of proofs:
+
+* [Proof of Replication (PoRep)](https://docs.filecoin.io/basics/the-blockchain/proofs/#proof-of-replication-porep): a procedure used at the time of initial data storage to validate that an SP has _created and stored_ a unique copy of some piece of data.
+* [Proof of Spacetime (PoST)](https://docs.filecoin.io/basics/the-blockchain/proofs/#proof-of-spacetime-post): a procedure to validate that an SP is _continuing to store_ a unique copy of some piece of data.
+
+## Proof-of-Replication (PoRep)
+
+In the Filecoin storage lifecycle process, _Proof-of-Replication (PoRep)_ is used when an SP agrees to store data on behalf of a client and receives a piece of client data. In this process:
+
+1. The data is placed into a [sector](https://docs.filecoin.io/basics/the-blockchain/proofs/) .
+2. The sector is sealed by the SP.
+3. A unique encoding, which serves as proof that the SP has replicated a copy of the data they agreed to store, is generated (described in [Sealing as proof](https://docs.filecoin.io/basics/the-blockchain/proofs/#sealing-as-proof)).
+4. The proof is compressed.
+5. The result of the compression is submitted to the network as certification of storage.
+
+### Sealing as proof
+
+The unique encoding created during the sealing process is generated using the following pieces of information:
+
+* The data being sealed.
+* The storage provider who seals the data.
+* The time at which the data was sealed.
+
+Because of the principles of cryptographic hashing, a new encoding will be generated if the data changes, the storage provider sealing the data changes, or the time of sealing changes. This encoding is unique and can be used to verify that a specific storage provider did, in fact, store a particular piece of client data at a specific time.
+
+## Proof-of-Spacetime (PoSt)
+
+After a storage provider has proved that they have replicated a copy of the data that they agreed to store, the SP must continue to prove to the network that:
+
+* They are still storing the requested data.
+* The data is available.
+* The data is still sealed.
+
+Because this method is concerned with proving that data is being stored in a particular _space_ for a particular period or at a particular _time_, it is called _Proof-of-Spacetime (PoSt)_. In Filecoin, the PoSt process is handled using two different sub-methods, each of which serves a different purpose:
+
+* [WinningPoSt](https://docs.filecoin.io/basics/the-blockchain/proofs/#winningpost) is used to prove that an SP selected using an election process has a replica of the data at the specific time that they were asked and is used in the block consensus process.
+* [WindowPoSt](https://docs.filecoin.io/basics/the-blockchain/proofs/#windowpost) is used to prove that, for any and all SPs in the network, a copy of the data that was agreed to be stored is being continuously maintained over time and is used to audit SPs continuously.
+
+### WinningPoSt
+
+_WinningPoSt_ is used to prove that an SP selected via election has a replica of the data at the specific time that they were asked and is specifically used in Filecoin to determine which SPs may add blocks to the Filecoin blockchain.
+
+At the beginning of each [epoch](https://docs.filecoin.io/basics/the-blockchain/proofs/) , a small number of SPs are elected to mine new blocks using the [Expected Consensus algorithm](https://spec.filecoin.io/algorithms/expected\_consensus/), which guarantees that validators will be chosen based on a probability proportional to their [power](https://docs.filecoin.io/basics/the-blockchain/proofs/) . Each of the SPs selected must submit a WinningPoSt, proof that they have a sealed copy of the data which they have included in their proposed block. The deadline to submit this proof is the end of the current epoch and was intentionally designed to be short, making it impossible for the SP to fabricate the proof. Successful submission grants the SP:
+
+* The [block reward](https://docs.filecoin.io/basics/the-blockchain/proofs/) .
+* The opportunity to charge other nodes fees in order to include their messages in the block.
+
+If an SP misses the submission deadline, no penalty is incurred, but the SP misses the opportunity to mine a block and receive the block reward.
+
+### WindowPoSt
+
+_WindowPoSt_ is used to prove that, for any and all SPs in the network, a copy of the data that was agreed to be stored is being continuously maintained over time and is used to audit SPs continuously. In WindowPoSt, all SPs must demonstrate the availability of all sectors claimed every [proving period](https://docs.filecoin.io/basics/the-blockchain/proofs/) . Sector availability is not proved individually; rather, SPs must prove a whole [partition](https://docs.filecoin.io/basics/the-blockchain/proofs/) at once, and that sector must be proved by the deadline assigned (a 30 minute interval in the proving period).
+
+The more sectors an SP has pledged to store, the more the partitions of sectors that the SP will need to prove per deadline. As this requires that the SP has access to sealed copies of each of the requested sectors, it makes it irrational for the SP to seal data every time they need to provide a WindowPoSt proof, thus ensuring that SPs on the network are continuously maintaining the data agreed to. Additionally, failure to submit WindowPoSt for a sector will result in the SPs’ pledge collateral being forfeited and their storage power being reduced.
diff --git a/basics/what-is-filecoin/blockchain.md b/basics/what-is-filecoin/blockchain.md
new file mode 100644
index 000000000..e03eb1987
--- /dev/null
+++ b/basics/what-is-filecoin/blockchain.md
@@ -0,0 +1,115 @@
+---
+description: >-
+ A blockchain is a distributed database that is shared among the nodes of a
+ computer network. This page covers how the Filecoin blockchain is designed,
+ and the various functions it has.
+---
+
+# Blockchain
+
+## Tipsets
+
+The Filecoin blockchain is a chain of tipsets rather than a chain of blocks. A tipset is a set of blocks with the same height and parent tipset. Therefore, multiple storage providers can produce blocks for each epoch to increase network throughput.
+
+Each tipset is assigned a weight, so the consensus protocol directs nodes to build on the heaviest chain. This provides a certain level of security to the Filecoin network by preventing a node from intentionally intervening with other nodes to produce valid blocks.
+
+## Actors
+
+An Actor in the Filecoin Blockchain is the equivalent of the smart contract in the Ethereum Virtual Machine. It is essentially an ‘object’ in the Filecoin network with a state and a set of methods that can be used to interact with it.
+
+### Built-in actors
+
+There are several built-in system actors that power the Filecoin network as the decentralized storage network.
+
+* System Actor - general system actor.
+* Init actor - initializes new actors and records the network name.
+* Cron Actor - a scheduler actor that runs critical functions at every epoch.
+* Account Actor - responsible for user accounts (non-singleton).
+* Reward Actor - managing block reward and token vesting (singleton).
+* Storage Miner Actor - storage mining operation and validate storage proofs.
+* Storage Power Actor - keeping track of the storage power allocated at each storage provider
+* Storage Market Actor - managing storage deals.
+* Multisig Actor - responsible for operations involving the Filecoin multi-signature wallet.
+* Payment Channel Actor - set up and settle payment channel funds.
+* Datacap Actor - responsible for datacap token management.
+* Verified Registry Actor - responsible for managing verified clients.
+* Ethereum address Manager (EAM) Actor- responsible for assigning all Ethereum compatible addresses on Filecoin Network, including EVM smart contract addresses and Ethereum account addresses.
+* EVM Account Actor - a non-singleton built-in actor representing an external Ethereum identity backed by a secp256k1 key.
+
+### User-programmable actors
+
+Along with the maturity of FVM, developers can write actors and deploy them to the Filecoin network in the same way as other blockchains. Other blockchains refer to these programs as _smart contracts_. User-programmable actors can also interact with built-in actors using the exported API from built-in actors.
+
+You can check out this [talk on How Filecoin Actors Work](https://curriculum.pl-launchpad.io/curriculum/filecoin/protocol/#how-filecoin-actors-work--zenground0--protocol-labs) to learn more.
+
+## Distributed randomness
+
+Filecoin uses distributed and publicly verifiable random beacon protocol - [Drand](https://drand.love) as the randomness beacon for the leader election during the [expected consensus](blockchain.md#expected-consensus) to produce blocks. This randomness guarantees that the leader election is secret, fair, and verifiable.
+
+## Nodes
+
+Nodes in the Filecoin network are primarily identified in terms of the services they provide to serve the Filecoin storage network, including chain verifier nodes, client nodes, storage provider nodes, and retrieval provider nodes. Any node participating in the Filecoin network should provide the chain verification service as a minimum.
+
+Filecoin is targeting multiple protocol implementations to guarantee the security and resilience of the Filecoin network. Currently, the actively maintained implementations are:
+
+* [Lotus](https://lotus.filecoin.io/)
+* [Venus](https://github.com/filecoin-project/venus)
+* [Forest](https://github.com/ChainSafe/forest)
+
+## Addresses
+
+In the Filecoin network, addresses are used to identify actors in the Filecoin state. The address encodes information about the corresponding actor, providing a robust address format that is easy to use and resistant to errors. There are five types of addresses in Filecoin. Mainnet addresses begin with the letter `f`, and Testnet addresses begin with the letter `t`.
+
+* `f0/t0`: an ID address for an actor in a more “human friendly” way. For instance, f0123261 is the ID for a storage provider.
+* `f1/t1`: a secp256k1 wallet address with encrypted key pair. Essentially, this is a wallet address generated from the secp256k1 public key.
+* `f2/t2`: an address represents an actor (smart contract) and is assigned in a way that makes it safe to use during network forks.
+* `f3/t3`: a BLS wallet address generated from a BLS public encryption key.
+* `f4/t4`: the addresses which were created and assigned to user-defined actors by user-definable “address management” actors. This address can receive funds before an actor has been deployed to the address.
+* `f410/t410`: the address space managed by Ethereum Address Manager (EAM) built-in actor. The original Ethereum addresses can be cast as f410/t410 addresses and vice versa to enable existing Ethereum tools to interact seamlessly with the Filecoin network.
+
+## Consensus
+
+Let’s quickly cover how consensus works in the Filecoin network.
+
+### Expected consensus
+
+Expected consensus (EC) is the underlying consensus algorithm used by Filecoin. EC is a probabilistic Byzantine fault-tolerant consensus protocol that runs a leader election among a set of storage providers to submit a block every epoch. Like proof-of-stake, Filecoin uses proof-of-storage for the leader election, meaning the likelihood of being elected depends on how much provable storage power a miner contributes to the network. The storage power of the network is stored in the storage power table and managed by the Storage Power Actor.
+
+At a high level, the consensus process relies on [Drand](https://drand.love) to provide distributed and verifiable randomness to keep leader election secret, fair and verifiable. All the election participants and their power are drawn from the Power Table, which is calculated and maintained over time by the Storage Power Consensus subsystem. Eventually, EC takes all valid blocks produced in this epoch and uses a weighting function to select the chain with the highest weight to add blocks.
+
+### Block production process
+
+The process of producing a block for each epoch can be briefly described as follows:
+
+* Elect leaders from eligible miners.
+* Miners need to check if they are elected.
+* An elected miner gets the randomness value to generate WinningPoSt.
+* If all above is successful, miners build and propagate a block.
+* Verify whether a miner won the block and verify the leader election.
+* Eventually, select the heaviest chain to add blocks.
+
+### Finality
+
+EC enforces a version of soft finality whereby all miners at round `N` will reject all blocks that fork off before round `N - F`. `F` is set to `900`. This is important to enforce finality at no cost to chain availability and functionality.
+
+## Proofs
+
+As a decentralized storage network, Filecoin is built on the proof-of-storage in which miners contribute their vacant storage space to the network to store data and then provide proofs for the client to verify if their data has been stored throughout a period.
+
+### Proof of replication
+
+Using proof-of-replication (PoRep), storage providers prove that they have created a unique copy of the client’s data and stored it on behalf of the network.
+
+### Proof of spacetime
+
+Storage providers also need to continuously prove that they store clients’ data for the whole lifetime of the storage deal. There are two types of challenges as part of the proof-of-spacetime (PoSt) process:
+
+* Winning PoSt guarantees that the storage provider maintains a copy of data at a specific time.
+* Window PoSt is used as proof that a copy of the data has been continuously maintained over time.
+
+### Slashing
+
+If storage providers fail to provide reliable uptime or act maliciously against the network, they will be penalized by slashing. Filecoin implements two kinds of slashing:
+
+* Storage fault slashing to penalize storage providers for not being able to maintain healthy and reliable storage sectors for the network.
+* Consensus fault slashing to penalize storage providers to sabotage the liveness and security of the consensus process.
diff --git a/basics/what-is-filecoin/crypto-economics.md b/basics/what-is-filecoin/crypto-economics.md
new file mode 100644
index 000000000..9e0b7dea8
--- /dev/null
+++ b/basics/what-is-filecoin/crypto-economics.md
@@ -0,0 +1,38 @@
+---
+description: >-
+ Crypto-economics is the study of how cryptocurrency can incentivize usages of
+ a blockchain network. This page covers how Filecoin manages incentivization
+ within the network.
+---
+
+# Crypto-economics
+
+## Native currency
+
+The native currency of Filecoin, FIL, is a utility token used to incentivize persistent storage on the Filecoin network. Storage providers mine FIL by providing reliable storage service or committing storage capacity on the network. It has a maximum circulating supply of 2,000,000,000 FIL, meaning that no more than 2 billion Filecoin will ever be created.
+
+As a utility token that aligns participants’ incentives with the long-term growth of the network, Filecoin issuance is aligned with the overall provable utility of the network. The majority of Filecoin supply would only be minted if the network achieved growth and utility targets at scale.
+
+Specifically, Filecoin uses a dual minting model for block reward minting:
+
+## Baseline minting
+
+Up to 770M FIL tokens are minted based on the performance of the network. These tokens would only fully release if the Filecoin network reached a Yottabyte of storage capacity in under 20 years, estimated to be \~1000x larger than today’s cloud storage capacity.
+
+## Simple minting
+
+330M FIL tokens are released on a 6 year half-life based on time, meaning that 97% of these tokens will be released in approximately 30 years time.
+
+Additionally, 300M FIL tokens are held back in the mining reserve to incentivize future types of mining.
+
+## Vesting
+
+Mining rewards undergo a vesting schedule to encourage long-term network alignment. For example, 75% of block rewards earned by miners vest linearly over 180 days, while 25% are made immediately available to improve miner cash flow and profitability. And the remaining FIL tokens are vested to Protocol Labs teams and Filecoin Foundation over 6 years and SAFT investors over 3 years, as shown in the vesting table here.
+
+## Collateral and slashing
+
+To encourage good behavior from network participants, during block reward mining, storage providers must lock Filecoin tokens as pledge collateral for consensus security, storage reliability, and contract guarantees. Pledge collateral is determined by projected block rewards that a miner would earn. Collateral and all earned rewards by storage providers are subject to slashing throughout the lifetime of a sector if the storage does not pass a reliability check.
+
+## Total supply
+
+FIL has a maximum circulating supply of 2,000,000,000 FIL, meaning that at most 2B Filecoin will ever be created. In practice, this max supply would never be reached since a considerable amount of FIL are burnt as gas fees, penalties etc., and are permanently removed from circulating supply.
diff --git a/basics/what-is-filecoin/networks.md b/basics/what-is-filecoin/networks.md
new file mode 100644
index 000000000..d4c78a4c6
--- /dev/null
+++ b/basics/what-is-filecoin/networks.md
@@ -0,0 +1,31 @@
+---
+description: >-
+ The Filecoin network has several different networks for testing, staging, and
+ production purposes. This page contains information on available networks.
+---
+
+# Networks
+
+## Mainnet
+
+[Mainnet](../../networks/mainnet/) is the live production network that all nodes on the Filecoin network are connected to. It never resets.
+
+## Testnets
+
+Test networks, or testnets, are version of the Filecoin network that attempt to simulate various aspects of the Filecoin mainnet. Since they are for testing they should not be used for production applications or services.
+
+### Calibration
+
+[Calibration](../../networks/calibration/) testnet is the most realistic simulation of the mainnet, where prospective storage providers can experience more realistic sealing performance and hardware requirements due to the use of final proofs constructions and parameters, and prospective storage clients can store and retrieve real data on the network. Clients can participate in deal-making workflows and storage/retrieval functionality. It also has the same sector size as the mainnet.
+
+* [Public endpoint](https://api.calibration.node.glif.io/rpc/v0)
+* [Blockchain explorer](https://calibration.filscan.io/)
+* [Faucet](https://faucet.calibration.fildev.network/)
+
+### Spacenet
+
+[Spacenet](../../networks/spacenet/) is a Filecoin testnet that’s been modified to support Interplanetary Consensus (IPC). It aims to provide developers with a testbed to deploy their FVM use cases and innovate with new Web3 applications that leverage IPC subnets and the high-performance consensus provided by the Mir framework and the Trantor BFT consensus protocol.
+
+* [Details](../../networks/spacenet/)
+* [Public endpoint](../../networks/spacenet/rpcs.md)
+* [Faucet](../../networks/spacenet/get-test-tokens.md)
diff --git a/basics/what-is-filecoin/programming-on-filecoin.md b/basics/what-is-filecoin/programming-on-filecoin.md
new file mode 100644
index 000000000..fdf15b6a3
--- /dev/null
+++ b/basics/what-is-filecoin/programming-on-filecoin.md
@@ -0,0 +1,86 @@
+---
+description: >-
+ Once data has been stored, it is possible to run computations and calculations
+ on that data, without needing to retrieve the data from a storage provider.
+ This page covers the basics of how programmin
+---
+
+# Programming on Filecoin
+
+## Compute-over-data
+
+When it comes to data, a common need beyond storage and retrieval is data transformation. The goal with the compute-over-data protocols is generally to perform computation over IPLD, which is the data layer used by content-addressed systems like Filecoin. There are working groups working on different types of computing on Filecoin data, such as large-scale parallel compute (e.g., Bacalhau) and cryptographically verifiable compute (e.g. [Lurk](https://filecoin.io/blog/posts/introducing-lurk-a-programming-language-for-recursive-zk-snarks/)), etc.
+
+For example, [Bacalhau](https://www.bacalhau.org/) is a platform for public, transparent, and optionally verifiable distributed computation. It enables users to run arbitrary Docker containers and WebAssembly (wasm) images as tasks against data stored in the InterPlanetary File System (IPFS).
+
+It is worth noting that Filecoin is uniquely positioned to support large-scale off-chain computation since the storage providers have to compute resources such as GPUs and CPUs colocated with their data. By supporting compute-over-data on the Filecoin network, we enable a new paradigm of computing on the data where the data exists rather than moving the data to external compute nodes.
+
+## Filecoin virtual machine
+
+The Filecoin virtual machine (FVM) is a runtime environment for smart contracts on the Filecoin network. Smart contracts enable users to run any bounded computation, including those that create and enforce rules for storing and accessing data on the network. The FVM is responsible for executing these smart contracts and ensuring they are executed correctly and securely.
+
+FVM is designed to support native Filecoin actors written in languages that compile to WASM, as well as smart contracts written for foreign runtimes, including Solidity contracts for Ethereum Virtual Machine (EVM), Secure EcmaScript (SES), and eBPF. The [reference FVM](https://github.com/filecoin-project/ref-fvm) and SDK are written in Rust.
+
+According to the FVM roadmap, we initially support smart contracts written in Solidity and eventually support any language that compiles to WASM.
+
+The FVM enables compute-over-states on the Filecoin network and allows developers to build endless new use cases on top of Filecoin. Some example use cases are:
+
+### Data organizations
+
+FVM can create a new kind of organization – one built around datasets of various kinds.
+
+#### Data DAO and tokenized datasets
+
+The FVM enables the creation and management of data-based decentralized and autonomous organizations – data DAOs. The FVM allows a group of individuals, or organizations, to curate and preserve data collection. Data DAOs can govern and monetize data access and pool the returns into a shared treasury to fund the collections preservation and long-term growth. One could even exchange those data tokens between peers and request computation services on that data, such as validation, joins, analysis, feature detection, and extraction, moving into machine learning.
+
+#### Perpetual storage
+
+FVM allows users to store once and have repair and replication bots manage the repetitive storage deal creation tasks so that data can be stored perpetually. Using a smart contract, users can provision a wallet with FIL, and storage providers can use that to fund data storage permanently. Repair bots can monitor the storage deals and replicate the data with other storage providers when necessary. This process gives users long-term storage permanence.
+
+### Financial services for miners
+
+FVM can provide a variety of financial services for storage providers. The needs of these SPs are unique to the Filecoin ecosystem.
+
+#### Lending and staking protocols
+
+Users can lend out Filecoin to storage providers to use it as storage collateral and receive interest in return. These loans can be undercollateralized based on the on-chain storage history of past storage provider performance. Community members can use this history to generate reputation scores, enabling everyone to identify good borrowers. On top of that, loans can be automatically paid back to investors by using a multisig as the storage provider’s owner address, including lenders and a third party, to help negotiate payback. New FVM-enabled smart contracts give every FIL token holder access to new yield opportunities on their holdings while also benefiting the whole Filecoin economy by allowing entry ramps for providing storage on the network.
+
+#### Insurance
+
+SPs need to have financial products that help protect them from the risk they are undertaking in creating more storage solutions. Certain characteristics such as payment history, length of operation, and availability can be used to craft insurance policies just as they can be used to underwrite loans to SPs. This can protect them from the financial consequences of active faulting or token price drops.
+
+### Core chain infrastructure
+
+We expect that FVM will gain feature parity with other chains that persist. This is required for any EVM chain to operate but is not necessarily tied to storage primitives.
+
+#### Decentralized exchanges
+
+Users on FVM need to be able to exchange FIL for other tokens issued on-chain. This may be a decentralized exchange such as a fork of Uniswap or Sushi or involve building a decentralized order book similar to Serum on Solana.
+
+#### Token bridges
+
+While not immediately on the roadmap, bridges are needed from EVM chains, Move chains, and Cosmos chains to bring wrapped tokens from other ecosystems into the fold. With the current launch, we are more focused internally since the value proposition of Filecoin is unique enough that it does not need to bootstrap TVL from other chains. However, in the long run, we expect FVM to be part of a broader family of blockchains.
+
+Besides these, there are a lot more use cases that the FVM could enable, such as data access control ([Medusa](https://cryptonet.org/projects/project-medusa-scalable-threshold-network-on-chain)), retrieval and trustless reputation systems, replication workers, storage bounties, and L2 networks. To learn more about what you can build on top of FVM, check out our [Request for Startup](https://protocollabs.notion.site/Request-for-Startups-FVM-edition-8cd3e76982d14e29b33335ca458fb087) post.
+
+If you are interested in building these use cases, there is a list of solution blueprints that might help as a reference point regarding how some of these could work on a high level:
+
+* [DataDAO Solution Blueprint](https://docs.google.com/document/d/1OYDh\_gs7mAk2M\_O9m-2KedQA7MNo6ysIzH6eaQZxMOk/edit?pli%3D1)
+* [Perpetual Storage Solution Blueprint](https://docs.google.com/document/d/19Kck1PiGGrUKyd6XBYj6NtsC5NiCjndUSsv0OFA1Lv0/edit)
+* [Lending pool cookbook](https://docs.google.com/document/d/18in74On0bY7KyEsPgItvNvfUUPcPtHjNQtVfLdJUyzM/edit)
+
+### Filecoin EVM
+
+The Filecoin EVM runtime (FEVM) is the Ethereum Virtual Machine (EVM) virtualized as a runtime on top of the Filecoin Virtual Machine (FVM). It will allow developers to port any existing EVM-based smart contracts straight onto the FVM and make them work out of the box. FEVM emulates EVM bytecode at the low level, supporting contracts written in Solidity, Vyper, and Yul. The EVM foreign runtime is based on preexisting OSS libraries, including [SputnikVM](https://github.com/rust-blockchain/evm) and Revm. You can find out more details in the [EVM <> FVM mapping specification](https://github.com/filecoin-project/fvm-project/blob/main/04-evm-mapping.md).
+
+Because Filecoin nodes offer the Ethereum JSON-RPC API support, FEVM is also completely compatible with any EVM development tools, such as Hardhat, Brownie, and MetaMask. Most smart contracts ported to Filecoin shouldn’t require changes or auditing. For example, new ERC-20 tokens can be launched on the Filecoin network or bridged directly to token pools on other chains.
+
+Developers can deploy actors on either the FEVM or native FVM; which one should you choose? The decision can be summed up as such: if you want better performance, write actors that are compiled to WASM and deployed to native FVM. If you are familiar with Solidity and want access to the EVM ecosystem of tools, but don’t mind slightly less performance, deploy to the FEVM.
+
+To sum it up, the FEVM allows current Web3 developers to quickly start writing actors on the Filecoin blockchain while using all of the tools, software packages, and languages they are used to while having access to Filecoin storage deals as a native.
+
+The difference between FEVM and EVM contracts is that contracts deployed on FEVM can interact with built-in actors to interact with Filecoin-specific actors, such as miner actors, as mentioned in the built-in actor section. This allows developers to build Filecoin-native decentralized applications for the new use cases mentioned above. Smart contracts deployed to the Ethereum blockchain have no direct access to the Filecoin network or Filecoin-specific actors.
+
+To allow Solidity smart contracts on FEVM to seamlessly call methods on Filecoin built-in actors and access Filecoin-specific syscalls idiomatically, a Filecoin-Solidity API library has been developed, you can use it for building your use cases, such as interacting with storage deals.
+
+If you build on FEVM, you might find some of the [example contracts here](https://github.com/lotus-web3/client-contract) helpful.
diff --git a/basics/what-is-filecoin/retrieval-market.md b/basics/what-is-filecoin/retrieval-market.md
new file mode 100644
index 000000000..6f53f7a7f
--- /dev/null
+++ b/basics/what-is-filecoin/retrieval-market.md
@@ -0,0 +1,22 @@
+---
+description: >-
+ The retrieval market refers to negotiating retrieval deals for a provider to
+ serve stored data to a client. In this agreement, the client agrees to pay the
+ retrieval provider a certain amount of FIL f
+---
+
+# Retrieval market
+
+## Basic retrieval
+
+Currently, Filecoin nodes support direct retrieval from the storage miners who originally stored the data. Clients can directly send retrieval requests to a storage provider to retrieve their data by paying some FIL for retrieval.
+
+Clients need to provide enough information to the storage provider for the data retrieval request, including:
+
+* Storage provider ID: The ID of the storage provider where the data is stored.
+* Payload CID: also called Data CID.
+* Address: The address initially used to create the storage deal.
+
+## Saturn
+
+[Saturn](https://saturn.tech/) is a Web3 CDN in Filecoin’s retrieval market which serves the data stored on Filecoin with low latency and at low cost. It consists of independent retrieval providers specifically dedicated to that business, making retrieval an efficient, fast, and reliable operation.
diff --git a/basics/what-is-filecoin/storage-market.md b/basics/what-is-filecoin/storage-market.md
new file mode 100644
index 000000000..37acb87b1
--- /dev/null
+++ b/basics/what-is-filecoin/storage-market.md
@@ -0,0 +1,40 @@
+---
+description: >-
+ The storage market is the data entry point into the network where storage
+ providers and clients negotiate and publish storage deals on-chain.
+---
+
+# Storage market
+
+## Deal making
+
+The lifecycle for a deal within the storage market contains four distinct phases:
+
+* Discovery: the client identifies potential SPs and asks for their prices.
+* Negotiation: once the client has selected an SP both parties agree to the term of the deal.
+* Publishing: the deal is published on-chain.
+* Handoff: the deal is added into a sector where data storage can be proven by the SP.
+
+## Filecoin plus
+
+The mission of Filecoin Plus (Fil+) is to maximize the amount of useful storage on the Filecoin network. The aim is to bring more meaningful and valuable data into the Filecoin network by offering verified clients cheap, or even free, storage. This mechanism is designed and operates around datacap, the storage quota allocated to verified clients to store data and boost incentives for storage providers.
+
+Verified clients can onboard data into Filecoin using datacap, which they apply from community-selected notaries. In return for storing verified storage deals, storage providers receive datacap with a 10x boost to their storage power which eventually increases their block rewards as an incentive.
+
+* Datacap: a Datacap token will be allocated to a verified client to spend in the storage deals carrying a 10x deal quality multiplier.
+* Notaries: community-selected notaries govern the program by verifying storage clients and allocating datacap tokens to verified clients.
+* Verified clients: clients are active network participants with datacap allocation for their data storage.
+
+## Storage on-ramps
+
+To simplify the process of storing data on the network, there are many storage helpers to provide an easier way to integrate Filecoin storage as well as IPFS into your applications or smart contracts.
+
+Storage helpers provide libraries that abstract Filecoin deal-making into simple, streamlined API calls and storing the data on IPFS to provide more efficient and fast retrieval for your content.
+
+Here are some available storage helpers:
+
+* [boost.filecoin.io](https://boost.filecoin.io/)
+* [estuary.tech](https://estuary.tech/)
+* [web3.storage](https://web3.storage/)
+* [nft.storage](https://nft.storage/)
+* [lighthouse.storage](https://www.lighthouse.storage/)
diff --git a/basics/what-is-filecoin/storage-model.md b/basics/what-is-filecoin/storage-model.md
new file mode 100644
index 000000000..fe9980018
--- /dev/null
+++ b/basics/what-is-filecoin/storage-model.md
@@ -0,0 +1,38 @@
+---
+description: >-
+ A storage model defines how data is stored within a system. This page covers
+ the basic aspects of Filecoin’s storage model.
+---
+
+# Storage model
+
+The Filecoin storage model consists of three components:
+
+* Providers
+* Deals
+* Sectors
+
+## Providers
+
+Providers, as the name suggests, provide a service to users of the network. There are two types of provider:
+
+* Storage providers
+* Retrieval providers
+
+### Storage providers
+
+Storage providers, often called SPs, are responsible for storing files and data for clients on the network and providing cryptographic proofs to verify storage. The vast majority of providers on the Filecoin network are SPs.
+
+### Retrieval providers
+
+Retrieval providers, often called RPs, are responsible for providing users quick access to their data. They focus on rapid access to data, rather than long-term storage. Most of the time storage providers also provide retrieval access to their users as part of the same system. However, more and more stand-alone RPs are joining the network.
+
+## Deals
+
+In the Filecoin network, SPs and RPs provide their storage or retrieval service to data clients through deals. These deals are negotiated and agreed upon between two parties, and include terms like data size, price, deal duration, and collateral.
+
+The deal-making process happens _off-chain_. Once both parties agree to the terms of a deal, that deal is published _on-chain_ for the rest of the network to see and validate.
+
+## Sectors
+
+Sectors are the basic units of provable storage where storage providers store clients’ data and generate PoSt on behalf of the Filecoin network. Sectors have standard sizes and a lifetime that storage providers can extend before reaching the end of the lifetime. `32 GiB` and `64 GiB` sector sizes are supported.
diff --git a/networks/calibration/README.md b/networks/calibration/README.md
new file mode 100644
index 000000000..9c2004918
--- /dev/null
+++ b/networks/calibration/README.md
@@ -0,0 +1,60 @@
+---
+description: >-
+ The calibration network is the most realistic testnet simulation of the
+ Filecoin mainnet.
+---
+
+# Calibration
+
+The Calibration testnet is the most realistic simulation of the Filecoin mainnet.
+
+Prospective storage providers can experience more realistic sealing performance and hardware requirements using final proofs constructions and parameters. Storage clients can store and retrieve _real data_ on the network. Clients can also participate in deal-making workflows and storage and retrieval functionality. The sector size on the Calibration testnet is the same as on the Filecoin mainnet; 32 GiB and 64 GiB sectors are supported. This testnet also includes the Filecoin EVM-runtime features found on the Filecoin mainnet.
+
+**Large sector sizes**
+
+A drawback of 32 GiB and 64 GiB sectors is that storage providers generally prefer deals that use a significant amount of the sector. This could cause issues for builders wanting to store less than 4 GiB of data.
+
+[PiKNiK](https://www.piknik.com/) is running a Boost-enabled storage provider that will listen out for small deals to solve this issue. The PiKNiK SP will attempt to aggregate these deals into 32 GiB sectors. If a deal has yet to be bundled into a 32 GiB sector after 12 hours, then the PiKNiK SP will publish it in whatever state it is in.
+
+Developers can reference pre-existing deals that are already available on the network. See the [`#fil-net-calibration-discuss` channel in the Filecoin Slack](https://filecoinproject.slack.com/archives/C01D42NNLMS) for support.
+
+**Maintainer**: [Protocol Labs](https://protocol.ai)
+
+## Genesis
+
+* CAR File: `QmY581cXXtNwHweiC69jECupu9EBx274huHjSgxPNv1zAAj`
+* Reset Timestamp: `2021-02-19T23:10:00Z`
+* Genesis Block CID: `bafy2bzaceapb7hfdkewspic7udnogw4xnhjvhm74xy5snwa24forre5z4s2lm`
+* SHA-1 Digest: `944c0c13172b9f552dfed5dfaffaba95113c8254`
+
+## Network parameters
+
+* Supported Sector Sizes: `32 GiB` and `64 GiB`
+* Consensus Miner Min Power: `32 GiB`
+* Epoch Duration Seconds: `30`
+* Expected Leaders per Epoch: `5`
+* WindowPoSt Proving Period: `2880`
+* WindowPoSt Challenge Window: `60`
+* WindowPoSt Period Deadlines: `48`
+* Pre-Commit Challenge Delay: `150`
+
+## Bootstrap peers
+
+```plaintext
+/dns4/bootstrap-0.calibration.fildev.network/tcp/1347/p2p/12D3KooWRLZAseMo9h7fRD6ojn6YYDXHsBSavX5YmjBZ9ngtAEec
+/dns4/bootstrap-1.calibration.fildev.network/tcp/1347/p2p/12D3KooWJFtDXgZEQMEkjJPSrbfdvh2xfjVKrXeNFG1t8ioJXAzv
+/dns4/bootstrap-2.calibration.fildev.network/tcp/1347/p2p/12D3KooWP1uB9Lo7yCA3S17TD4Y5wStP5Nk7Vqh53m8GsFjkyujD
+/dns4/bootstrap-3.calibration.fildev.network/tcp/1347/p2p/12D3KooWLrPM4WPK1YRGPCUwndWcDX8GCYgms3DiuofUmxwvhMCn
+```
+
+## Snapshots
+
+* [Latest minimal snapshot](https://snapshots.calibrationnet.filops.net/minimal/latest.zst)
+
+## Resources
+
+* [Faucet](https://faucet.calibration.fildev.network/)
+* [Stats Dashboard](https://stats.calibration.fildev.network/)
+* [Slack Channel for Updates: #fil-network-announcements](https://filecoinproject.slack.com/archives/C01AC6999KQ)
+* [Slack Channel for Questions: #fil-help](https://filecoinproject.slack.com/archives/CEGN061C5)
+* [Latest lightweight snapshot](https://forest.chainsafe.io/calibnet/snapshot-latest) generated with [Forest](http://github.com/ChainSafe/forest) by [ChainSafe](https://chainsafe.io/)
diff --git a/networks/calibration/explorers.md b/networks/calibration/explorers.md
new file mode 100644
index 000000000..5a70d2261
--- /dev/null
+++ b/networks/calibration/explorers.md
@@ -0,0 +1,31 @@
+---
+description: >-
+ The following block explorers are available for the Calibration testnet,
+ listed in alphabetical order.
+---
+
+# Explorers
+
+## Beryx
+
+[beryx.zondax.ch](https://beryx.zondax.ch)
+
+## Filfox
+
+[calibration.filfox.info](https://calibration/filfox.info)
+
+## Filscout
+
+[calibration.filscout.com](https://calibration.filscout.com/en)
+
+## Filscan
+
+[calibration.filscan.io](https://calibration.filscan.io/)
+
+## Glif Explorer [#](https://docs.filecoin.io/networks/calibration/explorers/#glif-explorer)
+
+[explorer.glif.io](https://explorer.glif.io/?network=calibration)
+
+## Starboard
+
+[fvm.starboard.ventures](https://fvm.starboard.ventures/)
diff --git a/networks/calibration/rpcs.md b/networks/calibration/rpcs.md
new file mode 100644
index 000000000..b4882f7d8
--- /dev/null
+++ b/networks/calibration/rpcs.md
@@ -0,0 +1,43 @@
+---
+description: Public RPC endpoints are available for the Calibration testnet.
+---
+
+# RPCs
+
+These endpoints are limited to [read-only Filecoin JSON RPC API calls](../../reference/json-rpc/) and [`MPoolPush`](../../reference/json-rpc/mpool.md) for sending messages that have already been signed.
+
+{% hint style="info" %}
+Chainlist contains a dynamically updated list of available RPCs along with stats like current block-height and latency. [Find out more at chainlist.network](https://chainlist.network)
+{% endhint %}
+
+## [Ankr](https://www.ankr.com/rpc/filecoin)
+
+* HTTPS: `https://rpc.ankr.com/filecoin_testnet`
+* [Ankr documentation](https://www.ankr.com/docs/rpc-service/chains/chains-list/#filecoin)
+
+## [ChainupCloud](https://cloud.chainup.com)
+
+* HTTPS: `https://filecoin-calibration.chainup.net/rpc/v1`
+* WebSocket: `wss://filecoin-calibration.chainup.net/rpc/v1`
+* [ChainupCloud documentation](https://docs.chainupcloud.com/blockchain-api/filecoin/public-apis)
+
+## [ChainStack](https://chainstacklabs.com)
+
+* HTTPS: `https://filecoin-calibration.chainstacklabs.com/rpc/v1`
+* WebSocket: `wss://ws-filecoin-calibration.chainstacklabs.com/rpc/v1`
+* [Chainstack documentation](https://chainstack.com/labs/#filecoin)
+
+## [Glif](https://glif.io)
+
+Please note that publicly available hosted endpoints **only guarantee 2000 of the latest blocks.**
+
+* HTTPS: `https://api.calibration.node.glif.io/rpc/v1`
+* WebSocket: `wss://wss.calibration.node.glif.io/apigw/lotus/rpc/v1`
+* Lotus lite-node command:
+
+```
+FULLNODE_API_INFO=wss://wss.calibration.node.glif.io/apigw/lotus lotus daemon --lite
+```
+
+* When using a lite-node, omit `/rpc/v1` from Glif’s WebSocket address.
+* [Glif documentation](https://hosting.glif.io/)
diff --git a/networks/local-testnet/README.md b/networks/local-testnet/README.md
new file mode 100644
index 000000000..73c8558e6
--- /dev/null
+++ b/networks/local-testnet/README.md
@@ -0,0 +1,674 @@
+---
+description: >-
+ Local networks are a useful way to get started with Filecoin development. This
+ guide covers how to start a local network using Lotus as the Filecoin node
+ implementation.
+---
+
+# Local testnet
+
+## Setup
+
+A Filecoin network has two node types: storage provider nodes and client nodes. In our local developer network (devnet), we’re going to create a single storage provider node to handle our requests, and we’ll also create a client node to pass information into our network. Both of these nodes run in the terminal. In total, we’ll have three terminal windows open at once.
+
+## Prerequisites
+
+The nodes we’re going to run have relatively lightweight hardware requirements. However, since we’re running multiple instances at once it’s recommended that your computer meets the following requirements:
+
+1. At least 8 GiB of RAM
+2. A quad-core CPU.
+3. (Optional) Because parts of this tutorial require multiple terminal windows, install a terminal multiplexer like [Tmux](https://github.com/tmux/tmux).
+
+## Steps
+
+To build the nodes, you’ll need some specific software. Run the following command to install the software prerequisites:
+
+{% tabs %}
+{% tab title="MacOS" %}
+1. Open a terminal window.
+2. Check that you have [Homebrew](https://brew.sh/) installed.\
+
+
+ ```shell
+ brew --version
+
+ # Homebrew 3.6.18
+ # ...
+ ```
+
+ \
+ If you do not see a version number. or receive an error message, install [Homebrew](https://brew.sh/).
+3. Ensure you have [XCode](https://developer.apple.com/xcode/) installed.\
+
+
+ ```shell
+ xcode-select -p
+
+ # /Library/Developer/CommandLineTools
+ ```
+
+ \
+ If you do not see the output above. or receive an error message, install [XCode](https://developer.apple.com/xcode/).
+4. Install the following dependencies:\
+
+
+ ```shell
+ brew install go bzr jq pkg-config hwloc coreutils
+ ```
+
+
+5. Install Rust:\
+
+
+ ```shell
+ curl https://sh.rustup.rs -sSf | sh -s -- -y
+
+
+ # ...
+ # Rust is installed now. Great!
+ # ...
+ ```
+
+
+6. Source the `~/.cargo/env` config file:\
+
+
+ ```shell
+ source "$HOME/.cargo/env"
+ ```
+{% endtab %}
+
+{% tab title="Ubuntu" %}
+1. Install the following dependencies:\
+
+
+ ```shell
+ sudo apt update -y
+ sudo apt install mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl clang build-essential hwloc libhwloc-dev wget -y
+ ```
+
+
+2. Install Go and add `/usr/local/go/bin` to your `$PATH` variable:\
+
+
+ ```shell
+ wget -c https://golang.org/dl/go1.18.8.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
+ ```
+
+
+3. You may need to export `/usr/local/go/bin` to your `$PATH`. This process changes depending on which shell you’re using:
+
+| Shell | Export to $PATH example |
+| ----- | ----------------------------------------------------------------------------- |
+| Bash | `echo 'export PATH=$PATH:/usr/local/go/bin' >> ~/.bashrc && source ~/.bashrc` |
+| ZSH | `echo 'export PATH=$PATH:/usr/local/go/bin' >> ~/.zshrc && source ~/.zshrc` |
+
+4. Install Rust and source the `~/.cargo/env` config file:
+
+```shell
+curl https://sh.rustup.rs -sSf | sh -s -- -y
+source "$HOME/.cargo/env"
+```
+
+5. Done! You can move on to the [Pre-build](https://docs.filecoin.io/networks/local-testnet/set-up/#pre-build) section.
+{% endtab %}
+{% endtabs %}
+
+### Pre-build
+
+Before we can build the Lotus binaries, there’s some setup we need to do. We’ll create the executable binaries within a new `~/lotus-devnet`.
+
+{% tabs %}
+{% tab title="MacOS Intel" %}
+1. Clone the repository:\
+
+
+ ```shell
+ git clone https://github.com/filecoin-project/lotus.git ~/lotus-devnet
+ cd lotus
+ ```
+
+
+2. Checkout to the latest stable branch:\
+
+
+ ```shell
+ git checkout releases
+ ```
+
+
+3. Done! You can move on to the [Build](https://docs.filecoin.io/networks/local-testnet/set-up/#build) section.
+{% endtab %}
+
+{% tab title="MacOS ARM" %}
+1. Clone the repository into a new `~/lotus-devnet` directory:\
+
+
+ ```shell
+ git clone https://github.com/filecoin-project/lotus.git ~/lotus-devnet
+ cd ~/lotus-devnet
+ ```
+
+
+2. Checkout to the latest stable branch:\
+
+
+ ```shell
+ git checkout releases
+ ```
+
+
+3. Create the necessary environment variables to allow Lotus to run on M1 architecture:\
+
+
+ ```shell
+ export LIBRARY_PATH=/opt/homebrew/lib
+ export FFI_BUILD_FROM_SOURCE=1
+ export PATH="$(brew --prefix coreutils)/libexec/gnubin:/usr/local/bin:$PATH"
+ ```
+
+
+4. Done! You can move on to the [Build](https://docs.filecoin.io/networks/local-testnet/set-up/#build) section.
+{% endtab %}
+
+{% tab title="Ubuntu" %}
+1. Clone the repository into a new `~/lotus-devnet` directory:\
+
+
+ ```shell
+ git clone https://github.com/filecoin-project/lotus.git ~/lotus-devnet
+ cd ~/lotus-devnet
+ ```
+
+
+2. Checkout to the latest stable branch:\
+
+
+ ```shell
+ git checkout releases
+ ```
+
+
+3. If your processor was released later than an AMD Zen or Intel Ice Lake CPU, enable the use of SHA extensions by adding these two environment variables:\
+
+
+ ```shell
+ export RUSTFLAGS="-C target-cpu=native -g"
+ export FFI_BUILD_FROM_SOURCE=1
+ ```
+
+ \
+ If in doubt, ignore this command and move on to [the next section](https://docs.filecoin.io/networks/local-testnet/set-up/#build).
+4. Done! You can move on to the [Build](https://docs.filecoin.io/networks/local-testnet/set-up/#build) section.
+{% endtab %}
+{% endtabs %}
+
+### Build
+
+1. Create the `2k` binary for Lotus:\
+
+
+ ```shell
+ make 2k
+ ```
+
+ \
+ This will output something like:\
+
+
+ ```plaintext
+ git submodule update --init --recursive
+ Submodule 'extern/filecoin-ffi' (https://github.com/filecoin-project/filecoin-ffi.git) registered for path 'extern/filecoin-ffi'
+ Submodule 'extern/serialization-vectors' (https://github.com/filecoin-project/serialization-vectors.git) registered for path 'extern/serialization-vectors'
+
+ ...
+ ```
+
+ \
+ This process will take about 5 minutes to complete.
+2. Fetch the proving parameters for a 2048-byte sector size:\
+
+
+ ```shell
+ ./lotus fetch-params 2048
+ ```
+
+ \
+ This will output something like:\
+
+
+ ```plaintext
+ 2023-01-31T10:44:43.058-0400 INFO paramfetch go-paramfetch@v0.0.4/paramfetch.go:244 Fetching /var/tmp/filecoin-proof-parameters/v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk from https://proofs.filecoin.io/ipfs
+ 2023-01-31T10:44:43.058-0400 INFO paramfetch go-paramfetch@v0.0.4/paramfetch.go:262 GET https://proofs.filecoin.io/ipfs/QmZCvxKcKP97vDAk8Nxs9R1fWtqpjQrAhhfXPoCi1nkDoF 13.32 KiB / 13.32 KiB [===========================================================================================================================================] 100.00% 155.63 KiB/s 0
+
+ ...
+ ```
+
+ \
+ This process downloads a few files totalling to around 2 GiB in size. Depending on your internet speed, this process can take a few minutes to complete.
+3. Pre-seal two sectors for the genesis block:\
+
+
+ ```shell
+ ./lotus-seed pre-seal --sector-size 2KiB --num-sectors 2
+ ```
+
+ \
+ This will output something like:\
+
+
+ ```plaintext
+ sector-id: {{1000 1} 5}, piece info: {2048 baga6ea4seaqf7ovs6euxa4ktencg2gza7lua32l2ugqu76uqgvnjocek6gtoufi}
+ 2023-01-31T10:49:46.562-0400 WARN preseal seed/seed.go:175 PreCommitOutput: {{1000 1} 5} bagboea4b5abcamxkzmzcciabqqk3xuuvj3k23nfuojboopyw3kg2mblhj6mzipii baga6ea4seaqf7ovs6euxa4ktencg2gza7lua32l2ugqu76uqgvnjocek6gtoufi
+ 2023-01-31T10:49:46.562-0400 WARN preseal seed/seed.go:100 PeerID not specified, generating dummy
+
+ ...
+ ```
+4. Create the genesis block:\
+
+
+ ```shell
+ ./lotus-seed genesis new localnet.json
+ ```
+
+
+5. Create a pre-miner and an address with some funds:\
+
+
+ ```shell
+ ./lotus-seed genesis add-miner localnet.json ~/.genesis-sectors/pre-seal-t01000.json
+ ```
+
+ \
+ This will output something like:\
+
+
+ ```plaintext
+ 2023-01-31T10:52:03.855-0400 INFO lotus-seed lotus-seed/genesis.go:129 Adding miner t01000 to genesis template
+ 2023-01-31T10:52:03.855-0400 INFO lotus-seed lotus-seed/genesis.go:146 Giving t3q4o7gkwe7p7xokhgws4rwntj7yqfhpj5pm6cqc7dycl7cwk4uvgh2odwdvge5re7ne5gcc6xluifss5uu5cq some initial balance
+ ```
+
+Our Lotus installation is now ready to start running the nodes!
+
+### Start the nodes
+
+As mentioned earlier, we will be running two types of a node: a storage provider node and a client node. In the Lotus project, a storage provider node is referred to as a _miner_. Since we’re going to run multiple nodes, you’ll need to have at least three terminal windows open. If your terminal emulator supports tabs, consider using them to help organize your setup.
+
+#### Client
+
+1. Open a new terminal window.
+2. Move into the `~/lotus-devnet` directory:\
+
+
+ ```shell
+ cd ~/lotus-devnet
+ ```
+
+
+3. Export the devnet-specific variables again to make sure we don’t interfere with any existing Lotus installations on your system:\
+
+
+ ```shell
+ export LOTUS_PATH=~/.lotus-local-net
+ export LOTUS_MINER_PATH=~/.lotus-miner-local-net
+ export LOTUS_SKIP_GENESIS_CHECK=_yes_
+ export CGO_CFLAGS_ALLOW="-D__BLST_PORTABLE__"
+ export CGO_CFLAGS="-D__BLST_PORTABLE__"
+ ```
+
+ \
+ Because environmental variables are reset when you open a new terminal window, these variables must be exported every time we start a new terminal.
+4. Start the client node using `lotus daemon`:\
+
+
+ ```shell
+ ./lotus daemon --lotus-make-genesis=devgen.car --genesis-template=localnet.json --bootstrap=false
+ ```
+
+ \
+ This will output something like:\
+
+
+ ```plaintext
+ 2023-01-31T10:57:41.022-0400 INFO main lotus/daemon.go:218 lotus repo: /home/johnny/.lotus
+ 2023-01-31T10:57:41.022-0400 INFO repo repo/fsrepo.go:265 Initializing repo at '/home/johnny/.lotus'
+ 2023-01-31T10:57:41.022-0400 INFO paramfetch go-paramfetch@v0.0.4/paramfetch.go:209 Parameter file /var/tmp/filecoin-proof-parameters/v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk is ok
+ ```
+
+ \
+ This command will continue to run. Leave this window open.
+
+#### Storage provider
+
+1. Open a new terminal window.
+2. Move into the `~/lotus-devnet` directory:\
+
+
+ ```shell
+ cd ~/lotus-devnet
+ ```
+
+
+3. Export the devnet-specific variables again to make sure we don’t interfere with any existing Lotus installations on your system:\
+
+
+ ```shell
+ export LOTUS_PATH=~/.lotus-local-net
+ export LOTUS_MINER_PATH=~/.lotus-miner-local-net
+ export LOTUS_SKIP_GENESIS_CHECK=_yes_
+ export CGO_CFLAGS_ALLOW="-D__BLST_PORTABLE__"
+ export CGO_CFLAGS="-D__BLST_PORTABLE__"
+ ```
+4. Import the genesis miner key:\
+
+
+ ```shell
+ ./lotus wallet import --as-default ~/.genesis-sectors/pre-seal-t01000.key
+ ```
+
+ \
+ This will output something like:\
+
+
+ ```plaintext
+ imported key t3q4o7gkwe7p7xokhgws4rwntj7yqfhpj5pm6cqc7dycl7cwk4uvgh2odwdvge5re7ne5gcc6xluifss5uu5cq successfully!
+ ```
+5. Initialize the genesis miner:\
+
+
+ ```shell
+ ./lotus-miner init --genesis-miner --actor=t01000 --sector-size=2KiB --pre-sealed-sectors=~/.genesis-sectors --pre-sealed-metadata=~/.genesis-sectors/pre-seal-t01000.json --nosync
+ ```
+
+ \
+ This will output something like:\
+
+
+ ```plaintext
+ 2023-01-31T11:04:46.148-0400 INFO main lotus-miner/init.go:130 Initializing lotus miner
+ 2023-01-31T11:04:46.148-0400 INFO main lotus-miner/init.go:157 Checking proof parameters
+
+ ...
+
+ 2023-01-31T11:04:46.148-0400 INFO main lotus-miner/init.go:283 Miner successfully created, you can now start it with 'lotus-miner run'
+ ```
+
+ \
+ This process take a few minutes to complete.
+6. Start the storage provider node with `lotus-miner run`:\
+
+
+ ```shell
+ ./lotus-miner run --nosync
+ ```
+
+ \
+ This terminal window will continue to run. You must run all further commands from a new terminal window.
+
+We now have a client node and a storage provider node successfully talking to each other! Next up, we can send requests to our client node to ensure everything is set up correctly.
+
+### Get some FIL
+
+Now that we’ve got our local devnet running let’s create a new wallet and send some funds from our miner account to that new wallet.
+
+#### Create a wallet
+
+There are multiple ways to create a new wallet. The simplest way is to use the Lotus CLI directly:
+
+1. Open a new terminal window.
+2. Move into the `~/lotus-devnet` directory:\
+
+
+ ```shell
+ cd ~/lotus-devnet
+ ```
+
+
+3. Export the devnet-specific variables again to make sure we don’t interfere with any existing Lotus installations on your system:\
+
+
+ ```shell
+ export LOTUS_PATH=~/.lotus-local-net
+ export LOTUS_MINER_PATH=~/.lotus-miner-local-net
+ export LOTUS_SKIP_GENESIS_CHECK=_yes_
+ export CGO_CFLAGS_ALLOW="-D__BLST_PORTABLE__"
+ export CGO_CFLAGS="-D__BLST_PORTABLE__"
+ ```
+4. Create a new wallet with `lotus wallet new`:\
+
+
+ ```shell
+ ./lotus wallet new
+ ```
+
+ \
+ This will output something like:\
+
+
+ ```plaintext
+ t1snly7vh4mjtjznwze56ihrdhzfwvbajywwmrenq
+ ```
+
+
+5. View the wallets available on this node with `lotus wallet list`:\
+
+
+ ```shell
+ ./lotus wallet list
+ ```
+
+ \
+ This will output something like:\
+
+
+ ```plaintext
+ Address Balance Nonce Default
+ t1snly7vh4mjtjznwze56ihrdhzfwvbajywwmrenq 0 FIL 0
+ t3q4o7gkwe7p7xokhgws4rwntj7yqfhpj5pm6cqc7dycl7cwk4uvgh2odwdvge5re7ne5gcc6xluifss5uu5cq 49999999.999763880085417692 FIL 2 X
+ ```
+
+
+6. You can now close this terminal window, or you can keep it open for the next section.
+
+#### Send funds
+
+We can now send FIL from the pre-mined `t3q4o7g...` account to our new `t1snly7...` account with `lotus send`:
+
+1. If you closed the terminal windows from the last section, open a new terminal window, move into the `~/lotus-devnet` directory, and export the devnnet-specific variables again with:\
+
+
+ ```shell
+ cd ~/lotus-devnet
+ export LOTUS_PATH=~/.lotus-local-net
+ export LOTUS_MINER_PATH=~/.lotus-miner-local-net
+ export LOTUS_SKIP_GENESIS_CHECK=_yes_
+ export CGO_CFLAGS_ALLOW="-D__BLST_PORTABLE__"
+ export CGO_CFLAGS="-D__BLST_PORTABLE__"
+ ```
+
+
+2. View the wallets available on this node with `lotus wallet list`:\
+
+
+ ```shell
+ ./lotus wallet list
+ ```
+
+ \
+ This will output something like:\
+
+
+ ```plaintext
+ Address Balance Nonce Default
+ t1snly7vh4mjtjznwze56ihrdhzfwvbajywwmrenq 0 FIL 0
+ t3q4o7gkwe7p7xokhgws4rwntj7yqfhpj5pm6cqc7dycl7cwk4uvgh2odwdvge5re7ne5gcc6xluifss5uu5cq 49999999.999763880085417692 FIL 2 X
+ ```
+
+ \
+ In the above example, the `t3q4o...` address is the _pre-mined_ address we created in an earlier step. This has a very large balance of FIL. We want to send FIL from this pre-mined address to our new `t1snl...` address.
+3. Create the send request with `lotus send`, supplying the pre-mined `t3q4o...` address as the `--from` address, the new `t1snl...` address as the receiving address, and the amount of FIL we want to send:\
+
+
+ ```shell
+ ./lotus send --from
+ ```
+
+ \
+ For example:\
+
+
+ ```shell
+ ./lotus send --from t3q4o7gkwe7p7xokhgws4rwntj7yqfhpj5pm6cqc7dycl7cwk4uvgh2odwdvge5re7ne5gcc6xluifss5uu5cq t1snly7vh4mjtjznwze56ihrdhzfwvbajywwmrenq 2000
+
+ # bafy2bzaceaqzbgiazwvtpago6wpkxl42puxfkvwv5cwjpime2irqatamji2bq
+ ```
+
+
+4. Check the balance of your new `t1snl...` address with `lotus wallet balance`:\
+
+
+ ```shell
+ ./lotus wallet balance
+ ```
+
+ \
+ For example:\
+
+
+ ```shell
+ ./lotus wallet balance t1snly7vh4mjtjznwze56ihrdhzfwvbajywwmrenq
+
+ # 2000 FIL
+ ```
+
+
+5. You can now close this terminal window, or you can keep it open for the next section.
+
+### Stop and restart
+
+You’ll eventually want to stop your local devnet from running or may need to restart it. Follow these steps.
+
+#### Stop the devnet
+
+1. Open the storage provider terminal window.
+2. Press `CTRL` + `c` to stop the node. The node will print `Graceful shutdown successful` once it has fully stopped:\
+
+
+ ```plaintext
+ # CTRL + c
+ ```
+
+ \
+ This will output something like:\
+
+
+ ```shell
+ ...
+
+ 2023-02-14T10:54:42.030-0400 DEBUG advmgr sealer/sched_worker.go:603 worker 1fa5f6b1-eb4d-4d92-98b1-6114a0d7695d dropped
+ 2023-02-14T10:54:42.056-0400 INFO builder node/shutdown.go:44 miner shut down successfully
+ 2023-02-14T10:54:42.056-0400 WARN builder node/shutdown.go:47 Graceful shutdown successful
+ ```
+
+
+3. You can now close the storage provider terminal window.
+4. Open the client terminal window.
+5. Press `CTRL` + `c` to stop the node. The node will print `Graceful shutdown successful` once it has fully stopped:\
+
+
+ ```plaintext
+ ...
+
+ 2023-02-14T10:55:42.475-0400 INFO badgerbs v2@v2.2007.3/db.go:554 Force compaction on level 0 done
+ 2023-02-14T10:55:42.502-0400 INFO builder node/shutdown.go:44 node shut down successfully
+ 2023-02-14T10:55:42.502-0400 WARN builder node/shutdown.go:47 Graceful shutdown successful
+ ```
+
+
+6. You can now close the client terminal window.
+
+#### Restart the devnet
+
+1. Open a new terminal window, move into the `~/lotus-devnet` directory, and export the devnnet-specific variables again with:\
+
+
+ ```shell
+ cd ~/lotus-devnet
+ export LOTUS_PATH=~/.lotus-local-net
+ export LOTUS_MINER_PATH=~/.lotus-miner-local-net
+ export LOTUS_SKIP_GENESIS_CHECK=_yes_
+ export CGO_CFLAGS_ALLOW="-D__BLST_PORTABLE__"
+ export CGO_CFLAGS="-D__BLST_PORTABLE__"
+ ```
+
+
+2. Start the client node with `lotus daemon`:\
+
+
+ ```shell
+ ./lotus daemon --lotus-make-genesis=devgen.car --genesis-template=localnet.json --bootstrap=false
+ ```
+
+ \
+ This will output something like:\
+
+
+ ```plaintext
+ 2023-01-31T10:57:41.022-0400 INFO main lotus/daemon.go:218 lotus repo: /home/johnny/.lotus
+ 2023-01-31T10:57:41.022-0400 INFO repo repo/fsrepo.go:265 Initializing repo at '/home/johnny/.lotus'
+ 2023-01-31T10:57:41.022-0400 INFO paramfetch go-paramfetch@v0.0.4/paramfetch.go:209 Parameter file /var/tmp/filecoin-proof-parameters/v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk is ok
+ ```
+
+ \
+ This command will continue to run. Leave this window open.
+3. For the storage provider node, open a new terminal window, move into the `~/lotus-devnet` directory, and export the devnnet-specific variables again with:\
+
+
+ ```shell
+ cd ~/lotus-devnet
+ export LOTUS_PATH=~/.lotus-local-net
+ export LOTUS_MINER_PATH=~/.lotus-miner-local-net
+ export LOTUS_SKIP_GENESIS_CHECK=_yes_
+ export CGO_CFLAGS_ALLOW="-D__BLST_PORTABLE__"
+ export CGO_CFLAGS="-D__BLST_PORTABLE__"
+ ```
+
+
+4. Restart the storage provider node with `lotus-miner run`:\
+
+
+ ```shell
+ ./lotus-miner run --nosync
+ ```
+
+ \
+ This will output something like:\
+
+
+ ```plaintext
+ 2023-01-31T12:54:12.009-0400 INFO main lotus-miner/run.go:98 Checking full node sync status
+ 2023-01-31T12:54:12.013-0400 INFO modules modules/core.go:64 memory limits initialized {"max_mem_heap": 0, "total_system_mem": 16444395520, "effective_mem_limit": 16444395520}
+ 2023-01-31T12:54:12.013-0400 WARN modules modules/core.go:124 failed to initialize cgroup-driven watchdog; err: failed to load cgroup for process: cgroups: cgroup mountpoint does not exist
+ ```
+
+
+5. This command will continue to run. Leave this window open.
+6. You must run all further commands from a new terminal window.
+
+### Next steps
+
+To summarize, you’ve started a local devnet, funded a new address, and exported that address to a file! You’ve got all the pieces ready to start developing applications on Filecoin!
+
+### Troubleshooting
+
+Running into issues? Check out these troubleshooting steps to figure out what’s going on.
+
+#### Could not get API info for FullNode
+
+You may encounter the following error message:
+
+```plaintext
+ERROR: could not get API info for FullNode: could not get api endpoint: API not running (no endpoint
+```
+
+If you receive this error when trying to call your Lotus daemon, either your `lotus daemon` isn’t running (see [Restart the devnet](./#stop-and-restart)) or you haven’t re-exported the necessary variables (see the [Build section](./#build)).
diff --git a/networks/local-testnet/get-test-tokens.md b/networks/local-testnet/get-test-tokens.md
new file mode 100644
index 000000000..e16a91759
--- /dev/null
+++ b/networks/local-testnet/get-test-tokens.md
@@ -0,0 +1,85 @@
+---
+description: >-
+ Test funds are available to developer so that they can test their smart
+ contracts and applications within the confines of a test network. This page
+ covers how to get test funds from a local testnet.
+---
+
+# Get test tokens
+
+Before we begin, you must have a local testnet running. Follow the [Run a local network guide](./) if you haven’t got a local testnet set up yet.
+
+1. Change directory to where you created the `lotus` and `lotus-miner` binaries. If you followed the [Run a local network guide](./) these binaries will be in `~/lotus-devnet`:\
+
+
+ ```shell
+ cd ~/lotus-devnet
+ ```
+
+
+2. View the wallets available on this node with `lotus wallet list`:\
+
+
+ ```shell
+ ./lotus wallet list
+ ```
+
+ \
+ This command will output something like:\
+
+
+ ```plaintext
+ Address Balance Nonce Default
+ t1snly7vh4mjtjznwze56ihrdhzfwvbajywwmrenq 0 FIL 0
+ t3q4o7gkwe7p7xokhgws4rwntj7yqfhpj5pm6cqc7dycl7cwk4uvgh2odwdvge5re7ne5gcc6xluifss5uu5cq 49999999.999763880085417692 FIL 2 X
+ ```
+
+
+3. Create the send request with `lotus send`, supplying the pre-mined `t3q4o...` address as the `--from` address, the new `t1snl...` address as the receiving address, and the amount of FIL we want to send:\
+
+
+ ```shell
+ ./lotus send --from
+ ```
+
+ \
+ For example:\
+
+
+ ```shell
+ ./lotus send --from t3q4o7gkwe7p7xokhgws4rwntj7yqfhpj5pm6cqc7dycl7cwk4uvgh2odwdvge5re7ne5gcc6xluifss5uu5cq t1snly7vh4mjtjznwze56ihrdhzfwvbajywwmrenq 2000
+ ```
+
+ \
+ This command will output something like:\
+
+
+ ```plaintext
+ bafy2bzaceaqzbgiazwvtpago6wpkxl42puxfkvwv5cwjpime2irqatamji2bq
+ ```
+
+
+4. Check the balance of your new `t1snl...` address with `lotus wallet balance`:\
+
+
+ ```shell
+ ./lotus wallet balance
+ ```
+
+ \
+ For example:\
+
+
+ ```shell
+ ./lotus wallet balance t1snly7vh4mjtjznwze56ihrdhzfwvbajywwmrenq
+ ```
+
+ \
+ This command will output something like:\
+
+
+ ```plaintext
+ 2000 FIL
+ ```
+
+If you want to manage your local testnet tokens in MetaMask you will need to create a `t4` address. You can create a `t4` address using `lotus wallet new deleated`. Once you have a `t4` address you can connect MetaMask to your local testnet to see the new balance within the MetaMask extension.
diff --git a/networks/mainnet/README.md b/networks/mainnet/README.md
new file mode 100644
index 000000000..c883971bc
--- /dev/null
+++ b/networks/mainnet/README.md
@@ -0,0 +1,57 @@
+---
+description: >-
+ Mainnet is the primary Filecoin network. Mainnet began on block 148,888. It
+ supports 32 GiB and 64 GiB sectors.
+---
+
+# Mainnet
+
+**Maintainer**: [Protocol Labs](https://protocol.ai)
+
+## Genesis
+
+* CAR File: `QmavMCf95w2UMYGD1J5GpHcWBWXR2jTFYmtAkgeroMmpk1`
+* Reset Timestamp: `2020-08-24T22:00:00Z`
+* Genesis Block CID: `bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2`
+* SHA-1 Digest: `4782cb42b4b01793b5cd9f593cc3dc87b6d3c7b4`
+
+## Network parameters
+
+* Supported Sector Sizes: `32 GiB` and `64 GiB`
+* Consensus Miner Min Power: `10 TiB`
+* Epoch Duration Seconds: `30`
+* Expected Leaders per Epoch: `5`
+* WindowPoSt Proving Period: `2880`
+* WindowPoSt Challenge Window: `60`
+* WindowPoSt Period Deadlines: `48`
+* Pre-Commit Challenge Delay: `150`
+
+## Bootstrap peers
+
+```plaintext
+/dns4/bootstrap-0.mainnet.filops.net/tcp/1347/p2p/12D3KooWCVe8MmsEMes2FzgTpt9fXtmCY7wrq91GRiaC8PHSCCBj
+/dns4/bootstrap-1.mainnet.filops.net/tcp/1347/p2p/12D3KooWCwevHg1yLCvktf2nvLu7L9894mcrJR4MsBCcm4syShVc
+/dns4/bootstrap-2.mainnet.filops.net/tcp/1347/p2p/12D3KooWEWVwHGn2yR36gKLozmb4YjDJGerotAPGxmdWZx2nxMC4
+/dns4/bootstrap-3.mainnet.filops.net/tcp/1347/p2p/12D3KooWKhgq8c7NQ9iGjbyK7v7phXvG6492HQfiDaGHLHLQjk7R
+/dns4/bootstrap-4.mainnet.filops.net/tcp/1347/p2p/12D3KooWL6PsFNPhYftrJzGgF5U18hFoaVhfGk7xwzD8yVrHJ3Uc
+/dns4/bootstrap-5.mainnet.filops.net/tcp/1347/p2p/12D3KooWLFynvDQiUpXoHroV1YxKHhPJgysQGH2k3ZGwtWzR4dFH
+/dns4/bootstrap-6.mainnet.filops.net/tcp/1347/p2p/12D3KooWP5MwCiqdMETF9ub1P3MbCvQCcfconnYHbWg6sUJcDRQQ
+/dns4/bootstrap-7.mainnet.filops.net/tcp/1347/p2p/12D3KooWRs3aY1p3juFjPy8gPN95PEQChm2QKGUCAdcDCC4EBMKf
+/dns4/bootstrap-8.mainnet.filops.net/tcp/1347/p2p/12D3KooWScFR7385LTyR4zU1bYdzSiiAb5rnNABfVahPvVSzyTkR
+/dns4/lotus-bootstrap.ipfsforce.com/tcp/41778/p2p/12D3KooWGhufNmZHF3sv48aQeS13ng5XVJZ9E6qy2Ms4VzqeUsHk
+/dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz
+/dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u
+/dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt
+/dns4/bootstrap-0.ipfsmain.cn/tcp/34721/p2p/12D3KooWQnwEGNqcM2nAcPtRR9rAX8Hrg4k9kJLCHoTR5chJfz6d
+/dns4/bootstrap-1.ipfsmain.cn/tcp/34723/p2p/12D3KooWMKxMkD5DMpSWsW7dBddKxKT7L2GgbNuckz9otxvkvByP
+```
+
+## Resources
+
+* [Latest lightweight snapshot](https://snapshots.mainnet.filops.net/minimal/latest) generated with [Lotus](https://lotus.filecoin.io/) by [Protocol Labs](https://protocol.ai/)
+* [Latest lightweight snapshot](https://forest.chainsafe.io/mainnet/snapshot-latest) generated with [Forest](http://github.com/ChainSafe/forest) by [ChainSafe](https://chainsafe.io/)
+* [Status page and incidents](https://filecoin.statuspage.io/)
+* [Stats dashboard](https://stats.filecoin.io/)
+* [Slack Channel for Updates: #fil-network-announcements](https://filecoinproject.slack.com/archives/C01AC6999KQ)
+* [Slack Channel for Questions: #fil-help](https://filecoinproject.slack.com/archives/CEGN061C5)
+* [Message and CID search: Glif Explorer](https://explorer.glif.io/)
diff --git a/networks/mainnet/explorers.md b/networks/mainnet/explorers.md
new file mode 100644
index 000000000..4eaf74b30
--- /dev/null
+++ b/networks/mainnet/explorers.md
@@ -0,0 +1,95 @@
+---
+description: >-
+ A block explorer is a tool that allows users to view and search the contents
+ of blocks on a blockchain. This page covers available explorers for the
+ Filecoin mainnet.
+---
+
+# Explorers
+
+## Account, actor & message explorers
+
+These explorers allow you to view the states of Filecoin accounts and actors (smart contracts) and the details of messages sent across the network.
+
+### Glif Explorer
+
+Website: [explorer.glif.io](https://explorer.glif.io)
+
+## Block explorers
+
+Block explorers allow you to view the details of the Filecoin network on a single website. Individual block explorers contain different features that may be useful. None of these sites are created or maintained by Protocol Labs or the Filecoin Foundation. This list is in alphabetical order:
+
+### Beryx
+
+Website [beryx.zondax.ch](https://beryx.zondax.ch/)
+
+
+
+### Filfox
+
+Website: [filfox.io](https://filfox.io)
+
+
+
+### Filexplore
+
+Website: [explorer.filmine.io](https://explorer.filmine.io/)
+
+### Filscan
+
+Website: [filscan.io](https://filscan.io)
+
+
+
+### Filscout
+
+Website: [Filscout](https://filscout.io)
+
+
+
+### Starboard
+
+Website: [fvm.starboard.ventures](https://fvm.starboard.ventures/)
+
+## API requests
+
+If you have access to a Filecoin node, you can send a JSON-RPC request to get block information.
+
+To get the head tipset:
+
+```shell
+curl --location --request POST 'https://api.node.glif.io/rpc/v1' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"Filecoin.ChainHead",
+ "params": null,
+ "id":1
+}' | jq
+```
+
+Print a block:
+
+```shell
+curl --location --request POST 'https://api.node.glif.io/rpc/v1' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"Filecoin.ChainGetBlock",
+ "params": [{"/": "bafy2bzacebpgpa7elb5uwqxbiljlzgbmpenv4yw2e3gpcjk7ssxv2ijv3ecv2"}],
+ "id":1
+}' | jq
+```
+
+Print message information:
+
+```shell
+curl --location --request POST 'https://api.node.glif.io/rpc/v1' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"Filecoin.ChainGetMessage",
+ "params": [{"/": "bafy2bzacebpgpa7elb5uwqxbiljlzgbmpenv4yw2e3gpcjk7ssxv2ijv3ecv2"}],
+ "id":1
+}' | jq
+```
diff --git a/networks/mainnet/network-performance.md b/networks/mainnet/network-performance.md
new file mode 100644
index 000000000..7a0ab8fde
--- /dev/null
+++ b/networks/mainnet/network-performance.md
@@ -0,0 +1,41 @@
+---
+description: >-
+ You can use these heuristics to understand general Filecoin network
+ performance and how it fits your use case.
+---
+
+# Network performance
+
+The Filecoin Network is a decentralized storage market and network that provides data persistence via a decentralized protocol and publicly verifiable storage proofs on a blockchain. Current Filecoin network performance is primarily determined by security parameters and Filecoin’s [proof constructions](https://spec.filecoin.io/#algorithms\_\_pos).
+
+It is highly non-trivial to provide highly reliable benchmarks for Filecoin network performance. However, as you begin interacting with Filecoin, you can use these heuristics to understand general Filecoin network performance and how it fits your use case.
+
+## Financial transfers
+
+A message that requires [transferring FIL](https://lotus.filecoin.io/docs/set-up/manage-fil/) is often extremely fast, and will take on average \~1 blocktime (or around 30 seconds) to be reflected on-chain. We consider 120 blocks (1 hour) a conservative number of confirmations for high-value transfers.
+
+## Data storage
+
+In the Filecoin data storage protocol, the following occurs once a deal is proposed and accepted:
+
+1. _Funding the storage market actor_: This process takes roughly 1-2 minutes and ensures that both the client and the storage provider have funds and collateral to pay for the deal.
+2. _Data transfer_: This portion of the deal flow involves the client’s node sending the relevant data to the providing node. The data transfer rate varies widely, depending on the client and the storage provider’s network and disk bandwidths. Generally, the network speed between client and storage provider is the limiting factor in transfer rate.
+3. _Deal shows up on-chain_: Once the data is received, the storage provider verifies that it matches the deal parameters. Then, the provider publishes the deal on the chain.
+4. _Sector sealing_: Once the deal shows up on-chain, the storage provider must still generate Proof-of-Replication and seal the sector. This process is currently estimated to take roughly 1.5 hours for a 32 GB sector on a machine that meets the minimum hardware requirements for storage providers.
+
+For the majority of clients, the most important metric is the time from deal acceptance to deal appearance on-chain. This metric is the sum of the time required to complete steps (1) through (3), described above. Based on current high-level benchmarks, these steps are estimated to take roughly 5-10 minutes for a 1 MiB file.
+
+## Data retrieval
+
+There are two methods by which one can directly retrieve data from the Filecoin network:
+
+* **Fast retrieval**: By default, some Filecoin clients, like lotus, enable storage providers to store an unsealed copy of the stored data in addition to a sealed copy. The sealed copy is necessary for the ongoing storage proofs that a storage provider must submit, while the unsealed copy can be used for quicker retrievals of the data from the storage provider. While this is a valuable feature, there is no guarantee that all storage providers are storing extra unsealed copies of the stored data, as this is not a verifiable part of the protocol. In lotus, this feature is called _fast-retrieval_.
+* **Retrieval after unsealing**: Because of the Filecoin protocol’s design, storage providers are essentially cryptographically guaranteed to store client data in its sealed format. Therefore, if the storage provider doesn’t have an unsealed copy of the data stored, they will have to unseal the sealed data first (i.e., decoding the encoded data) and then serve it back up to the requester (i.e., the retrieval client).
+
+In both methods, the data retrieval process after a retrieval deal is accepted includes:
+
+1. **Funding the payment channel for retrieval**: Similar to the storage deal payment channel funding above, except for data retrieval. The timing estimates for payment channel creation and funding are roughly the same as noted above.
+2. **Unsealing (if needed)**: The storage provider unseals (decodes) the data so that the requester can read it. Sealing and unsealing are symmetric processes, which means they take roughly the same amount of time in either direction. Thus, the unsealing step is estimated to take around as long as the sealing step listed above, or around \~3 hours for a 32 GiB sector on a machine running minimum hardware requirements.
+3. **Data transfer**: The storage provider begins transferring the data back to the data requester. This is also transferred back at a rate similar to the original data transfer rate, which depends on several factors.
+
+Because of the various steps involved in the data retrieval process, Filecoin storage currently meets similar performance bars as traditional _warm_ or _cold_ storage. To get performance that is similar to other hot storage solutions, most users utilize Filecoin with a caching layer such as IPFS. These hybrid and multi-tiered storage solutions use IPFS for hot storage and Filecoin for affordable, frequent, and versioned backups. Some example hybrid storage products include [Powergate](https://blog.textile.io/filecoin-developer-tools-concepts/) and [Textile Buckets](https://docs.textile.io/buckets/).
diff --git a/networks/mainnet/rpcs.md b/networks/mainnet/rpcs.md
new file mode 100644
index 000000000..66bb58f6d
--- /dev/null
+++ b/networks/mainnet/rpcs.md
@@ -0,0 +1,48 @@
+---
+description: Public RPC endpoints are available for the Filecoin mainnet.
+---
+
+# RPCs
+
+{% hint style="info" %}
+[Chainlist](https://chainlist.org/chain/314159) contains a dynamically updated list of available RPCs along with stats like current block-height and latency. [Find out more at chainlist.org](https://chainlist.org/chain/314159)
+{% endhint %}
+
+## [Ankr](https://ankr.com)
+
+* HTTPS: `https://rpc.ankr.com/filecoin`
+* [Supported Filecoin API methods](https://www.ankr.com/docs/rpc-service/chains/chains-list/#filecoin)
+
+## [Chainstack](https://www.chainstack.com)
+
+* HTTPS: `https://filecoin-mainnet.chainstacklabs.com/rpc/v1`
+* WebSocket: `wss://ws-filecoin-mainnet.chainstacklabs.com/rpc/v1`
+* [Chainstack documentation](https://docs.chainstack.com)
+
+## [ChainupCloud](https://cloud.chainup.com)
+
+* HTTPS: `https://filecoin.chainup.net/rpc/v1`
+* WebSocket: `wss://filecoin.chainup.net/rpc/v1`
+* [ChainupCloud documentation](https://docs.chainupcloud.com/blockchain-api/filecoin/public-apis)
+
+## [Glif](https://glif.io)
+
+Please note that publicly available hosted endpoints **only guarantee 2000 of the latest blocks.**
+
+* HTTPS: `https://api.node.glif.io/rpc/v1`
+* WebSocket: `wss://wss.node.glif.io/apigw/lotus/rpc/v1`
+* Lotus lite-node command:
+
+ ```shell
+ FULLNODE_API_INFO=wss://wss.node.glif.io/apigw/lotus lotus daemon --lite
+ ```
+
+ When using a lite-node, omit `/rpc/v1` from Glif’s WebSocket address.
+* [Glif documentation](https://hosting.glif.io/)
+
+## [NOWNodes](https://nownodes.io/)
+
+These nodes are available for free to users for the [first month](https://nownodes.io/pricing), after which the user will have to pay to maintain access.
+
+* HTTPS: `https://fil.nownodes.io`
+* [NOWNodes documentation](https://documenter.getpostman.com/view/13630829/TVmFkLwy)
diff --git a/networks/networks.md b/networks/networks.md
new file mode 100644
index 000000000..9fafe0f22
--- /dev/null
+++ b/networks/networks.md
@@ -0,0 +1,2 @@
+# Networks
+
diff --git a/networks/spacenet/README.md b/networks/spacenet/README.md
new file mode 100644
index 000000000..bb868ded5
--- /dev/null
+++ b/networks/spacenet/README.md
@@ -0,0 +1,42 @@
+---
+description: >-
+ Spacenet is a Filecoin testnet modified to support Interplanetary Consensus
+ (IPC).
+---
+
+# Spacenet
+
+**Maintainer**: [ConsensusLab](https://consensuslab.world)
+
+Spacenet is not _just_ another Filecoin testnet. It has been modified to run the high-performance [Trantor BFT consensus](https://github.com/filecoin-project/mir/tree/main/pkg/trantor), implemented atop [Mir](https://github.com/filecoin-project/mir), a framework for implementing distributed protocols. And did we forget to mention Spacenet comes with built-in [Interplanetary Consensus (IPC)](../../basics/interplanetary-consensus/) support?
+
+Spacenet aims to provide developers with a testbed to deploy decentralized applications that can benefit from IPC’s capabilities, particularly the ability to deploy and transact in subnets.
+
+To learn more about what you can do on Spacenet, visit the [Interplanetary Consensus](../../basics/interplanetary-consensus/) section. For instructions on working with Spacenet, visit the [Spacenet repository](https://github.com/consensus-shipyard/spacenet).
+
+## Genesis
+
+* [Spacenet Genesis](https://github.com/consensus-shipyard/lotus/blob/spacenet/build/genesis/spacenet.car)
+
+## Network parameters
+
+* Supported Sector Sizes: `2 KiB` and `8 MiB`
+* Consensus Miner Min Power: `2 KiB`
+* Epoch Duration Seconds: `15`
+* WindowPoSt Proving Period: `2880`
+* WindowPoSt Challenge Window: `60`
+* WindowPoSt Period Deadlines: `48`
+* Pre-Commit Challenge Delay: `10`
+
+## Bootstrap peers
+
+```plaintext
+/dns4/bootstrap-1.spacenet.ipc.space/tcp/1347/p2p/12D3KooWBgvwdJfJzi33n3RtesHdXvW16pGqaVgzD2WCijxvwEp1
+/dns4/bootstrap-2.spacenet.ipc.space/tcp/1347/p2p/12D3KooW9u5RNjcw5zbkZcWGo2WWwjEbvr1Qz7sTs9GpxNw5xNzC
+```
+
+## Resources
+
+* [Spacenet repository](https://github.com/consensus-shipyard/spacenet)
+* [Spacenet status page](https://spacenet.statuspage.io/)
+* [Contact form](https://docs.google.com/forms/d/1O3\_kHb2WJhil9sqXOxgGGGsqkAA61J1rKMfnb5os5yo/edit)
diff --git a/networks/spacenet/get-test-tokens.md b/networks/spacenet/get-test-tokens.md
new file mode 100644
index 000000000..81c38a9ca
--- /dev/null
+++ b/networks/spacenet/get-test-tokens.md
@@ -0,0 +1,23 @@
+---
+description: >-
+ Test funds are available to developers so that they can test their smart
+ contracts and applications within the confines of a test network. This page
+ covers how to get test funds on Spacenet.
+---
+
+# Get test tokens
+
+A faucet is available at [faucet.spacenet.ipc.space](https://faucet.spacenet.ipc.space) for developers to get test Filecoin (tFIL) on the Spacenet testnet. The faucet has the following restrictions:
+
+* The per-request allowance given by the faucet is 10 tFIL.
+* There is a daily maximum of 20 tFIL per address.
+* And we have also limited the maximum amount of funds that can be withdrawn daily.
+
+
+
+## Request more funds
+
+If you need more Spacenet tFIL for your application, feel free to drop us a message at one of the following locations:
+
+* [`#spacenet` channel in Filecoin Slack](https://filecoinproject.slack.com/archives/C043ZAHPFKL)
+* [ipc@protocol.ai](mailto:ipc@protocol.ai)
diff --git a/networks/spacenet/rpcs.md b/networks/spacenet/rpcs.md
new file mode 100644
index 000000000..c03df4e1b
--- /dev/null
+++ b/networks/spacenet/rpcs.md
@@ -0,0 +1,23 @@
+---
+description: Public RPC endpoints are available for the Spacenet testnet.
+---
+
+# RPCs
+
+## [Glif](https://glif.io)
+
+* HTTPS: `https://api.spacenet.node.glif.io/` (defaults to /rpc/v1)
+ * `https://api.spacenet.node.glif.io/rpc/v0`
+ * `https://api.spacenet.node.glif.io/rpc/v1`
+
+## Basic
+
+This endpoint is currently limited to read-only [JSON RPC API calls](../../reference/json-rpc/).
+
+* HTTP: `http://api.spacenet.ipc.space:1234/rpc/v1`
+* Eudico lite-node command:\
+
+
+ ```shell
+ FULLNODE_API_INFO=/dns4/api.spacenet.ipc.space/tcp/1234/http ./eudico mir daemon --lite
+ ```
diff --git a/nodes/full-nodes/README.md b/nodes/full-nodes/README.md
new file mode 100644
index 000000000..da8b3dcbb
--- /dev/null
+++ b/nodes/full-nodes/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ This section contain information on how to spin up a full Filecoin node using
+ Lotus, and options for using remote nodes.
+---
+
+# Full-nodes
+
diff --git a/nodes/full-nodes/basic-setup.md b/nodes/full-nodes/basic-setup.md
new file mode 100644
index 000000000..9ed7d37cf
--- /dev/null
+++ b/nodes/full-nodes/basic-setup.md
@@ -0,0 +1,39 @@
+---
+description: >-
+ This page gives a very basic overview of how to install Lotus on your
+ computer.
+---
+
+# Basic setup
+
+To install Lotus on your computer, follow these steps:
+
+1. First, you need to download the appropriate binary file for your operating system. Go to the [official Lotus GitHub repository](https://github.com/filecoin-project/lotus) and select the latest release that is compatible with your system. You can choose from Windows, macOS, and Linux distributions.
+2. Once you have downloaded the binary file, extract the contents to a directory of your choice. For example, if you are using Linux, you can extract the contents to the `/usr/local/bin directory` by running the command:
+
+```sh
+sudo tar -C /usr/local/bin -xzf lotus-X.X.X-linux-amd64.tar.gz
+```
+
+3. Replace `X.X.X` with the version number of the release you downloaded.
+4. After extracting the contents, navigate to the `lotus` directory in your terminal. For example, if you extracted the contents to `/usr/local/bin`, you can navigate to the lotus directory by running the command:
+
+```sh
+cd /usr/local/bin/lotus-X.X.X
+```
+
+5. Again, replace `X.X.X` with the version number of the release you downloaded.
+6. Run the `lotus` binary file to start the Lotus daemon. You can do this by running the command:
+
+```sh
+./lotus daemon
+```
+
+7. This will start the Lotus daemon, which will connect to the Filecoin network and start synchronizing with other nodes on the network.
+8. Optionally, you can also run the lotus-miner binary file if you want to participate in the Filecoin mining process. You can do this by running the command:
+
+```sh
+./lotus-miner run
+```
+
+9. This will start the Lotus miner, which will use your computer’s computing power to mine new blocks on the Filecoin network.
diff --git a/nodes/full-nodes/node-providers.md b/nodes/full-nodes/node-providers.md
new file mode 100644
index 000000000..6b379e017
--- /dev/null
+++ b/nodes/full-nodes/node-providers.md
@@ -0,0 +1,25 @@
+---
+description: >-
+ A node providers, sometimes specifically called a remote node providers, are
+ services that offers access to remote nodes on the Filecoin network.
+---
+
+# Node providers
+
+Nodes are essential components of the Filecoin network. They maintain copies of the blockchain’s entire transaction history and verify the validity of new transactions and blocks. Running a node requires significant computational resources and storage capacity, which can be demanding for individual developers or teams.
+
+### Benefits
+
+Remote node providers address this challenge by hosting and maintaining Filecoin nodes on behalf of their clients. By utilizing a remote node provider, developers can access blockchain data, submit transactions, and query the network without the need to synchronize the entire blockchain or manage the infrastructure themselves. This offers convenience and scalability, particularly for applications or services that require frequent and real-time access to blockchain data.
+
+Remote node providers typically offer APIs or other communication protocols to facilitate seamless integration with their hosted nodes. These APIs allow developers to interact with the Filecoin network, retrieve data, and execute transactions programmatically.
+
+### Potential drawbacks
+
+It’s important to note that when using a remote node provider, developers are relying on the provider’s infrastructure and trustworthiness. You should carefully choose a reliable and secure provider to ensure the integrity and privacy of their interactions with the blockchain network.
+
+Node providers often limit the specifications of the nodes that they offer. Some developers may need particularly speedy nodes or nodes that contain the entire history of the blockchain (which can be incredibly expensive to store).
+
+### Node providers
+
+There are multiple node providers for the Filecoin mainnet and each of the testnets. Checkout the [Networks section](https://docs.filecoin.io/nodes/full-nodes/node-providers/) for details.
diff --git a/nodes/full-nodes/pre-requisites.md b/nodes/full-nodes/pre-requisites.md
new file mode 100644
index 000000000..db42eab04
--- /dev/null
+++ b/nodes/full-nodes/pre-requisites.md
@@ -0,0 +1,21 @@
+---
+description: >-
+ This page provide details on Lotus installation prerequisites and supported
+ platforms.
+---
+
+# Pre-requisites
+
+Before installing Lotus on your computer, you will need to meet the following prerequisites:
+
+* **Operating system**: Lotus is compatible with Windows, macOS, and various Linux distributions. Ensure that your operating system is compatible with the version of Lotus you intend to install.
+* **CPU architecture**: Lotus is compatible with 64-bit CPU architectures. Ensure that your computer has a 64-bit CPU.
+* **Memory**: Lotus requires at least 8GB of RAM to run efficiently.
+* **Storage**: Lotus requires several GB of free disk space for the blockchain data, as well as additional space for the Lotus binaries and other files.
+* **Internet connection**: Lotus requires a stable and high-speed internet connection to synchronize with the Filecoin network and communicate with other nodes.
+* **Firewall and port forwarding**: Ensure that your firewall settings and port forwarding rules allow incoming and outgoing traffic on the ports used by Lotus.
+* **Command-line interface**: Lotus is primarily operated through the command line interface. Ensure that you have a basic understanding of command-line usage and are comfortable working in a terminal environment.
+
+## Lotus documentation
+
+To get more information, check out the official [Lotus documentation](https://lotus.filecoin.io/lotus/install/prerequisites/).
diff --git a/nodes/implementations/README.md b/nodes/implementations/README.md
new file mode 100644
index 000000000..342a4a244
--- /dev/null
+++ b/nodes/implementations/README.md
@@ -0,0 +1,63 @@
+---
+description: >-
+ Nodes are participants that contribute to the network’s operation and maintain
+ its integrity. There are two major node implementations running on the
+ Filecoin network today, with more in the works.
+---
+
+# Implementations
+
+## Lotus
+
+
+
+Lotus is the reference implementation of the Filecoin protocol, developed by Protocol Labs, the organization behind Filecoin. Lotus is a full-featured implementation of the Filecoin network, including the storage, retrieval, and mining functionalities. It is written in Go and is designed to be modular, extensible, and highly scalable.
+
+[Learn more about Lotus](lotus.md)
+
+## Venus
+
+
+
+Venus is an open-source implementation of the Filecoin network, developed by IPFSForce. The project is built in Rust and is designed to be fast, efficient, and scalable.
+
+Venus is a full-featured implementation of the Filecoin protocol, providing storage, retrieval, and mining functionalities. It is compatible with the Lotus implementation and can interoperate with other Filecoin nodes on the network.
+
+One of the key features of Venus is its support for the Chinese language and market. Venus provides a Chinese language user interface and documentation, making it easier for Chinese users to participate in the Filecoin network.
+
+[Learn more about Venus](venus.md)
+
+## Implementation differences
+
+while Lotus and Venus share many similarities, they differ in their development, language, feature sets, focus, and community support. Depending on your needs and interests, you may prefer one implementation over the other:
+
+### Language
+
+Lotus is written in Go, while Venus is written in Rust.
+
+### Compatibility
+
+Both Lotus and Venus are fully compatible with the Filecoin network and can interoperate with other Filecoin nodes on the network.
+
+### Features
+
+While both implementations provide storage, retrieval, and mining functionalities, they differ in their feature sets. Lotus includes features such as a decentralized storage market, a retrieval market, and a built-in consensus mechanism, while Venus includes features such as automatic fault tolerance, intelligent storage allocation, and decentralized data distribution.
+
+### Focus
+
+Lotus has a more global focus, while Venus has a stronger focus on the Chinese market. Venus provides a Chinese language user interface and documentation, making it easier for Chinese users to participate in the Filecoin network.
+
+## Other implementations
+
+The following implementations exist, but aren’t fully featured.
+
+### Forest
+
+
+
+Forest is an implementation of Filecoin written in Rust. It is currently in beta. The implementation will take a modular approach to building a full Filecoin node in two parts:
+
+* Building Filecoin’s security critical systems in Rust from the Filecoin Protocol Specification, specifically the virtual machine, blockchain, and node system,
+* Integrating functional components for storage mining and storage & retrieval markets to compose a fully functional Filecoin node implementation.
+
+You can find the [Forest codebase on GitHub](https://github.com/ChainSafe/forest) and the documentation site at [`chainsafe.github.io/forest`](https://chainsafe.github.io/forest/).
diff --git a/nodes/implementations/lotus.md b/nodes/implementations/lotus.md
new file mode 100644
index 000000000..9c2b9ce8f
--- /dev/null
+++ b/nodes/implementations/lotus.md
@@ -0,0 +1,36 @@
+---
+description: >-
+ Lotus is a full-featured implementation of the Filecoin network, including the
+ storage, retrieval, and mining functionalities. It is the reference
+ implementation of the Filecoin protocol.
+---
+
+# Lotus
+
+## Interact with Lotus
+
+There are many ways to interact with a Lotus node, depending on your specific needs and interests. By leveraging the powerful tools and APIs provided by Lotus, you can build custom applications, extend the functionality of the network, and contribute to the ongoing development of the Filecoin ecosystem.
+
+### Lotus API
+
+Lotus provides a comprehensive API that allows developers to interact with the Filecoin network programmatically. The API includes methods for performing various operations such as storing and retrieving data, mining blocks, and transferring FIL tokens. You can use the API to build custom applications or integrate Filecoin functionality into your existing applications.
+
+### Lotus CLI
+
+Lotus includes a powerful command-line interface that allows developers to interact with the Filecoin network from the terminal. You can use the CLI to perform various operations such as creating wallets, sending FIL transactions, and querying the network. The CLI is a quick and easy way to interact with the network and is particularly useful for testing and development purposes.
+
+### Custom plugin
+
+Lotus is designed to be modular and extensible, allowing developers to create custom plugins that add new functionality to the network. You can develop plugins that provide custom storage or retrieval mechanisms, implement new consensus algorithms, or add support for new network protocols.
+
+### Source contributions
+
+If you are interested in contributing to the development of Lotus itself, you can do so by contributing to the open-source codebase on GitHub. You can submit bug reports, suggest new features, or submit code changes to improve the functionality, security, or performance of the network.
+
+## Hosted nodes
+
+Many hosting service provide access to Lotus nodes on the Filecoin network. Check out the [RPC section for more information](../../networks/mainnet/rpcs.md)
+
+## More information
+
+For more information about Lotus, including advanced configuration, check out the Lotus documentation site [lotus.filecoin.io](https://lotus.filecoin.io).
diff --git a/nodes/implementations/venus.md b/nodes/implementations/venus.md
new file mode 100644
index 000000000..19a815afa
--- /dev/null
+++ b/nodes/implementations/venus.md
@@ -0,0 +1,34 @@
+---
+description: >-
+ Venus is an open-source implementation of the Filecoin network, developed by
+ the blockchain company IPFSForce. Venus is built in Go and is designed to be
+ fast, efficient, and scalable.
+---
+
+# Venus
+
+Venus is a full-featured implementation of the Filecoin protocol, providing storage, retrieval, and mining functionalities. It is compatible with the Lotus implementation and can interoperate with other Filecoin nodes on the network.
+
+One of the key features of Venus is its support for the Chinese language and market. Venus provides a Chinese language user interface and documentation, making it easier for Chinese users to participate in the Filecoin network.
+
+Venus also includes several advanced features, such as automatic fault tolerance, intelligent storage allocation, and decentralized data distribution. These features are designed to improve the reliability and efficiency of the storage and retrieval processes on the Filecoin network.
+
+## Interact with Venus
+
+Here are some of the most common ways to interact with Venus:
+
+### Venus API
+
+Venus provides a comprehensive API that allows developers to interact with the Filecoin network programmatically. The API includes methods for performing various operations such as storing and retrieving data, mining blocks, and transferring FIL tokens. You can use the API to build custom applications or integrate Filecoin functionality into your existing applications.
+
+### Command-line interface
+
+Venus includes a powerful command-line interface that allows developers to interact with the Filecoin network from the terminal. You can use the CLI to perform various operations such as creating wallets, sending FIL transactions, and querying the network. The CLI is a quick and easy way to interact with the network and is particularly useful for testing and development purposes.
+
+### Contribute to source
+
+If you are interested in contributing to the development of Venus itself, you can do so by contributing to the open-source codebase on GitHub. You can submit bug reports, suggest new features, or submit code changes to improve the functionality, security, or performance of the network.
+
+## More information
+
+For more information about Venus, including advanced configuration, see the [Venus documentation site](https://venus.filecoin.io).
diff --git a/nodes/lite-nodes/README.md b/nodes/lite-nodes/README.md
new file mode 100644
index 000000000..7463a0285
--- /dev/null
+++ b/nodes/lite-nodes/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ This section covers what lite-nodes are, and how developers can use them to
+ interact with the Filecoin network.
+---
+
+# Lite-nodes
+
diff --git a/nodes/lite-nodes/spin-up-a-lite-node.md b/nodes/lite-nodes/spin-up-a-lite-node.md
new file mode 100644
index 000000000..bfc49cab7
--- /dev/null
+++ b/nodes/lite-nodes/spin-up-a-lite-node.md
@@ -0,0 +1,421 @@
+---
+description: >-
+ Lite-nodes are a simplified node option that allow developers to perform
+ lightweight tasks on a local node. This page covers how to spin-up a lite node
+ on your local machine.
+---
+
+# Spin up a lite-node
+
+In this guide, we’re going to use the [Lotus](../implementations/lotus.md) Filecoin implementation. We’ll show how to install a lite-node on MacOS and Ubuntu. For other Linux distributions, check out the [Lotus documentation](https://lotus.filecoin.io/lotus/install/linux/#building-from-source). To run a lite-node on Windows, install [WLS with Ubuntu](https://ubuntu.com/tutorials/install-ubuntu-on-wsl2-on-windows-10#1-overview) on your system and follow the _Ubuntu_ instructions below.
+
+## Prerequisites
+
+Lite-nodes have relatively lightweight hardware requirements – it’s possible to run a lite-node on a Raspberry Pi 4. Your machine should meet the following hardware requirements:
+
+1. At least 2 GiB of RAM
+2. A dual-core CPU.
+
+To build the lite-node, you’ll need some specific software. Run the following command to install the software prerequisites:
+
+{% tabs %}
+{% tab title="MacOS" %}
+1. Ensure you have [XCode](https://developer.apple.com/xcode/) and [Homebrew](https://brew.sh/) installed.
+2. Install the following dependencies:
+
+ ```sh
+ brew install go bzr jq pkg-config hwloc coreutils
+ ```
+
+3. Install Rust and source the `~/.cargo/env` config file:
+
+ ```sh
+ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
+ source "$HOME/.cargo/env"
+ ```
+
+{% endtab %}
+{% tab title="Ubuntu" %}
+1. Install the following dependencies:
+
+ ```sh
+ sudo apt update -y
+ sudo apt install mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl clang build-essential hwloc libhwloc-dev wget -y
+ ```
+
+2. Install Go and add `/usr/local/go/bin` to your `$PATH` variable:
+
+ ```sh
+ wget -c https://golang.org/dl/go1.18.8.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
+ echo 'export PATH=$PATH:/usr/local/go/bin' >> ~/.bashrc && source ~/.bashrc
+ ```
+
+3. Install Rust and source the `~/.cargo/env` config file:
+
+ ```sh
+ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
+ source "$HOME/.cargo/env"
+ ```
+
+{% endtab %}
+{% endtabs %}
+
+## Pre-build
+
+Before we can build the Lotus binaries, there’s some setup we need to do. MacOS users should select their CPU architecture from the tabs:
+
+{% tabs %}
+{% tab title="MacOS Intel" %}
+1. Clone the repository, and move into the `lotus` directory:
+
+ ```sh
+ git clone https://github.com/filecoin-project/lotus.git
+ cd lotus/
+ ```
+
+2. Switch to the branch representing the network you want to use. Mainnet always uses the `releases` branch:
+
+ ```sh
+ git checkout releases
+ ```
+
+ Or you can checkout to the Calibration testnet release using the `ntwk/calibration` branch:
+
+ ```sh
+ git checkout ntwk/calibration
+ ```
+
+3. Done! You can move on to the [Build](https://docs.filecoin.io/nodes/lite-nodes/spin-up-a-lite-node/#build-the-binary) section.
+{% endtab %}
+{% tab title="MacOS ARM" %}
+1. Clone the repository, and move into the `lotus` directory:
+
+ ```sh
+ git clone https://github.com/filecoin-project/lotus.git
+ cd lotus
+ ```
+
+2. Switch to the branch representing the network you want to use. Mainnet always uses the `releases` branch:
+
+ ```sh
+ git checkout releases
+ ```
+
+ Or you can checkout to the Calibration testnet release using the `ntwk/calibration` branch:
+
+ ```sh
+ git checkout ntwk/calibration
+ ```
+
+3. Create the necessary environment variables to allow Lotus to run on M1 architecture:
+
+ ```bash
+ export LIBRARY_PATH=/opt/homebrew/lib
+ export FFI_BUILD_FROM_SOURCE=1
+ export PATH="$(brew --prefix coreutils)/libexec/gnubin:/usr/local/bin:$PATH"
+ ```
+
+4. Done! You can move on to the [Build](https://docs.filecoin.io/nodes/lite-nodes/spin-up-a-lite-node/#build-the-binary) section.
+{% endtab %}
+{% tab title="Ubuntu" %}
+1. Clone the repository, and move into the `lotus` directory:
+
+ ```sh
+ git clone https://github.com/filecoin-project/lotus.git
+ cd lotus
+ ```
+
+2. Switch to the branch representing the network you want to use. Mainnet always uses the `releases` branch:
+
+ ```sh
+ git checkout releases
+ ```
+
+ Or you can checkout to the Calibration testnet release using the `ntwk/calibration` branch:
+
+ ```sh
+ git checkout ntwk/calibration
+ ```
+
+3. If your processor was released later than an AMD Zen or Intel Ice Lake CPU, enable the use of SHA extensions by adding these two environment variables. If in doubt, ignore this command and move on to [the next section](https://docs.filecoin.io/nodes/lite-nodes/spin-up-a-lite-node/#build-the-binary).
+
+ ```sh
+ export RUSTFLAGS="-C target-cpu=native -g"
+ export FFI_BUILD_FROM_SOURCE=1
+ ```
+
+4. Done! You can move on to the Build section.
+{% endtab %}
+{% endtabs %}
+
+## Build the binary
+
+The last thing we need to do to get our node setup is to build the package. The command you need to run depends on which network you want to connect to:
+
+{% tabs %}
+{% tab title="Mainnet" %}
+1. Remove or delete any existing Lotus configuration files on your system:
+
+ ```shell
+ mv ~/.lotus ~/.lotus-backup
+ ```
+
+2. Make the Lotus binaries and install them:
+
+ ```shell
+ make clean all
+ sudo make install
+ ```
+
+3. Once the installation finishes, query the Lotus version to ensure everything is installed successfully and for the correct network:
+
+ ```shell
+ lotus --version
+ ```
+
+ This will output something like:
+
+ ```plaintext
+ lotus version 1.19.1-dev+mainnet+git.94b621dd5
+ ```
+{% endtab %}
+{% tab title="Calibration" %}
+1. Remove or delete any existing Lotus configuration files on your system:
+
+ ```shell
+ mv ~/.lotus ~/.lotus-backup
+ ```
+
+2. Make the Lotus binaries and install them:
+
+ ```shell
+ make clean && make calibrationnet
+ sudo make install
+ ```
+
+3. Once the installation finishes, query the Lotus version to ensure everything is installed successfully and for the correct network:
+
+ ```shell
+ lotus --version
+ ```
+
+ This will output something like:
+
+ ```plaintext
+ lotus version 1.19.1-dev+calibrationnet+git.94b621dd5.dirty
+ ```
+
+{% endtab %}
+{% endtabs %}
+
+## Start the node
+
+Let's start the lite-node by connecting to a remote full-node. We can use the public full-nodes from [https://www.glif.io](https://www.glif.io):
+
+{% tabs %}
+{% tab title="Mainnet" %}
+1. Create an environment variable called `FULLNODE_API_INFO` and set it to the WebSockets address of the node you want to connect to. At the same time, start the Lotus daemon with the `--lite` tag:
+
+ ```shell
+ FULLNODE_API_INFO=wss://wss.mainnet.node.glif.io/apigw/lotus lotus daemon --lite
+ ```
+
+ This will output something like:
+
+ ```plaintext
+ 2023-01-26T11:18:54.251-0400 INFO main lotus/daemon.go:219 lotus repo: /Users/johnny/.lotus
+ 2023-01-26T11:18:54.254-0400 WARN cliutil util/apiinfo.go:94 API Token not set and requested, capabilities might be limited.
+ ...
+ ```
+
+2. The Lotus daemon will continue to run in this terminal window. All subsequent commands we use should be done in a separate terminal window.
+{% endtab %}
+{% tab title="Calibration" %}
+1. Create an environment variable called `FULLNODE_API_INFO` and set it to the WebSockets address of the node you want to connect to. At the same time, start the Lotus daemon with the `--lite` tag:
+
+ ```shell
+ FULLNODE_API_INFO=wss://wss.calibration.node.glif.io/apigw/lotus lotus daemon --lite
+ ```
+
+ This will output something like:
+
+ ```plaintext
+ 2023-01-26T11:18:54.251-0400 INFO main lotus/daemon.go:219 lotus repo: /Users/johnny/.lotus
+ 2023-01-26T11:18:54.254-0400 WARN cliutil util/apiinfo.go:94 API Token not set and requested, capabilities might be limited.
+ ...
+ ```
+
+2. The Lotus daemon will continue to run in this terminal window. All subsequent commands we use should be done in a separate terminal window.
+{% endtab %}
+{% endtabs %}
+
+## Expose the API
+
+To send JSON-RPC requests to our lite-node we need to expose the API.
+
+{% tabs %}
+{% tab title="Mainnet" %}
+1. Open `~/.lotus/config.toml` and uncomment `ListenAddress` on line 6:
+
+ ```toml
+ [API]
+ # Binding address for the Lotus API
+ #
+ # type: string
+ # env var: LOTUS_API_LISTENADDRESS
+ ListenAddress = "/ip4/127.0.0.1/tcp/1234/http"
+
+ # type: string
+ # env var: LOTUS_API_REMOTELISTENADDRESS
+ # RemoteListenAddress = ""
+ ...
+ ```
+
+2. Open the terminal window where your lite-node is running and press `CTRL` + `c` to close the daemon.
+3. In the same window, restart the lite-node:
+
+ ```shell
+ FULLNODE_API_INFO=wss://wss.mainnet.node.glif.io/apigw/lotus lotus daemon --lite
+ ```
+
+ This will output something like:
+
+ ```plaintext
+ 2023-01-26T11:18:54.251-0400 INFO main lotus/daemon.go:219 lotus repo: /Users/johnny/.lotus
+ 2023-01-26T11:18:54.254-0400 WARN cliutil util/apiinfo.go:94 API Token not set and requested, capabilities might be limited
+ ...
+ ```
+
+4. The Lotus daemon will continue to run in this terminal window. All subsequent commands we use should be done in a separate terminal window.
+{% endtab %}
+{% tab title="Calibration" %}
+1. Open `~/.lotus/config.toml` and uncomment `ListenAddress` on line 6:
+
+ ```toml
+ [API]
+ # Binding address for the Lotus API
+ #
+ # type: string
+ # env var: LOTUS_API_LISTENADDRESS
+ ListenAddress = "/ip4/127.0.0.1/tcp/1234/http"
+
+ # type: string
+ # env var: LOTUS_API_REMOTELISTENADDRESS
+ # RemoteListenAddress = ""
+
+ ...
+ ```
+
+2. Open the terminal window where your lite-node is running and press `CTRL` + `c` to close the daemon.
+3. In the same window restart the lite-node:
+
+ ```shell
+ FULLNODE_API_INFO=wss://wss.calibration.node.glif.io/apigw/lotus lotus daemon --lite
+ ```
+
+ This will output something like:
+
+ ```plaintext
+ 2023-01-26T11:18:54.251-0400 INFO main lotus/daemon.go:219 lotus repo: /Users/johnny/.lotus
+ 2023-01-26T11:18:54.254-0400 WARN cliutil util/apiinfo.go:94 API Token not set and requested, capabilities might be limited.
+ ...
+ ```
+
+4. The Lotus daemon will continue to run in this terminal window. All subsequent commands we use should be done in a separate terminal window.
+{% endtab %}
+{% endtabs %}
+
+The lite-node is now set up to accept local JSON-RPC requests! However, we don't have an authorization key, so we won't have access to privileged JSON-RPC methods.
+
+## Create a key
+
+To access privileged JSON-RPC methods, like creating a new wallet, we need to supply an authentication key with our Curl requests.
+
+1. Create a new admin token and set the result to a new `LOTUS_ADMIN_KEY` environment variable:
+
+ ```shell
+ lotus auth create-token --perm "admin"
+ ```
+
+ This will output something like:
+
+ ```plaintext
+ eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.um-LqY7g-SDOsMheDRbQ9JIaFzus_Pan0J88VQ6ZLVE
+ ```
+
+2. Keep this key handy. We're going to use it in the next section.
+
+## Send requests
+
+Let's run a couple of commands to see if the JSON-RPC API is set up correctly.
+
+1. First, let's grab the head of the Filecoin network chain:
+
+ ```shell
+ curl -X POST '127.0.0.1:1234/rpc/v0' \
+ -H 'Content-Type: application/json' \
+ --data '{"jsonrpc":"2.0","id":1,"method":"Filecoin.ChainHead","params":[]}' \
+ | jq
+ ```
+
+ This will output something like:
+
+ ```plaintext
+ {
+ "jsonrpc": "2.0",
+ "result": {
+ "Cids": [
+ {
+ "/": "bafy2bzacead2v2y6yob7rkm4y4snthibuamzy5a5iuzlwvy7rynemtkdywfuo"
+ },
+ {
+ "/": "bafy2bzaced4zahevivrcdoefqlh2j45sevfh5g3zsw6whpqxqjig6dxxf3ip6"
+ },
+ ...
+ ```
+
+2. Next, let's try to create a new wallet. Since this is a privileged method, we need to supply our auth key `eyJhbGc...`:
+
+ ```shell
+ curl -X POST '127.0.0.1:1234/rpc/v0' \
+ -H 'Content-Type: application/json' \
+ -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.um-LqY7g-SDOsMheDRbQ9JIaFzus_Pan0J88VQ6ZLVE' \
+ --data '{"jsonrpc":"2.0","id":1,"method":"Filecoin.WalletNew","params":["secp256k1"]}' \
+ | jq
+ ```
+
+ This will output something like:
+
+ ```plaintext
+ {
+ "jsonrpc": "2.0",
+ "result": "t1vuc4eu2wgsdnce2ngygyzuxky3aqijqe7gj5qqa",
+ "id": 1
+ }
+ ```
+
+ The result field is the public key for our address. The private key is stored within our lite-node.
+
+3. Set the new address as the default wallet for our lite-node:
+
+ ```shell
+ curl -X POST '127.0.0.1:1234/rpc/v0' \
+ -H 'Content-Type: application/json' \
+ -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.um-LqY7g-SDOsMheDRbQ9JIaFzus_Pan0J88VQ6ZLVE' \
+ --data '{"jsonrpc":"2.0","id":1,"method":"Filecoin.WalletSetDefault","params":["t1vuc4eu2wgsdnce2ngygyzuxky3aqijqe7gj5qqa"]}' \
+ | jq
+ ```
+
+ This will output something like:
+
+ ```plaintext
+ {
+ "jsonrpc": "2.0",
+ "id": 1
+ }
+ ```
+ {% endcode %}
+
+## Next steps
+
+You should now have a local lite-node connected to a remote full-node with an admin API key! You can use this setup to continue playing around with the JSON-RPC, or start building your applications on Filecoin!
diff --git a/nodes/nodes.md b/nodes/nodes.md
new file mode 100644
index 000000000..7ab634452
--- /dev/null
+++ b/nodes/nodes.md
@@ -0,0 +1,2 @@
+# Nodes
+
diff --git a/reference/built-in-actors/README.md b/reference/built-in-actors/README.md
new file mode 100644
index 000000000..d9c5a399d
--- /dev/null
+++ b/reference/built-in-actors/README.md
@@ -0,0 +1,125 @@
+---
+description: >-
+ Built-in actors are how the Filecoin network manages and updates global state.
+ This page contains information on how to smart contracts can access built-in
+ actors.
+---
+
+# Built-in actors
+
+Built-in actors are how the Filecoin network manages and updates _global state_. The _global state_ of the network at a given epoch can be thought of as the set of blocks agreed upon via network consensus in that epoch. This global state is represented as a _state tree_, which maps an actor to an _actor state_. An _actor state_ describes the current conditions for an individual actor, such as its FIL balance and its nonce. In Filecoin, actors trigger a _state transition_ by sending a _message_. Each block in the chain can be thought of as a **proposed** global state, where the block selected by network consensus sets the **new** global state. Each block contains a series of messages, and a checkpoint of the current global state after the application of those messages. The Filecoin Virtual Machine (FVM) is the Filecoin network component that is in charge of execution of all actor code.
+
+A basic example of how built-in actors are used in Filecoin is the process by which storage providers prove storage and are subsequently rewarded. The process is as follows:
+
+1. The [`StorageMinerActor`](https://docs.filecoin.io/reference/built-in-actors/overview/#storagemineractor) processes proof of storage from a storage provider.
+2. The storage provider is awarded storage power based on whether the proof is valid or not.
+3. The [`StoragePowerActor`](https://docs.filecoin.io/reference/built-in-actors/overview/#storagepoweractor) accounts for the storage power.
+4. During block validation, the `StoragePowerActor`’s state, which includes information on storage power allocated to each storage provider, is read.
+5. Using the state information, the consensus mechanism randomly awards blocks to the storage providers with the most power, and the [`RewardActor`](https://docs.filecoin.io/reference/built-in-actors/overview/#rewardactor) sends FIL to storage providers.
+
+## Blocks
+
+Each block in the Filecoin chain contains:
+
+* Inline data such as current block height.
+* A pointer (CID) to the current state tree.
+* A pointer (CID) to the set of messages that, when applied to the network, generated the current state tree.
+
+## State tree
+
+A Merkle Directed Acyclic Graph (Merkle DAG) is used map the state tree. and the set of messages. Nodes in the state tree contain information on:
+
+* Actors, like FIL balance, nonce and a pointer (CID) to actor state data.
+* Messages in the current block
+
+## Messages
+
+Like the state tree, a Merkle Directed Acyclic Graph (Merkle DAG) is used to map the set of messages for a given block. Nodes in the messages map contain information on:
+
+* The actor the message was sent to
+* The actor that sent the message
+* Target method to call on actor being sent the message
+* A cryptographic signature for verification
+* The amount of FIL transferred between actors
+
+## Actor code
+
+The code that defines an actor in the Filecoin network is separated into different methods. Messages sent to an actor contain information on which method(s) to call, and the input parameters for those methods. Additionally, actor code interacts with a _runtime_ object, which contains information on the general state of network, such as the current epoch, and cryptographic signatures and proof validations. Like smart contracts in other blockchains, actors must pay a _gas fee_, which is some predetermined amount of FIL to offset the cost (network resources used, etc.) of a transaction. Every actor has a Filecoin balance attributed to it, a state pointer, a code which tells the system what type of actor it is, and a nonce, which tracks the number of messages sent by this actor
+
+## Types of built-in actors
+
+The 11 different types of built-in actors are as follows:
+
+* [CronActor](https://docs.filecoin.io/reference/built-in-actors/overview/#cronactor)
+* [InitActor](https://docs.filecoin.io/reference/built-in-actors/overview/#initactor)
+* [AccountActor](https://docs.filecoin.io/reference/built-in-actors/overview/#accountactor)
+* [RewardActor](https://docs.filecoin.io/reference/built-in-actors/overview/#rewardactor)
+* [StorageMarketActor](https://docs.filecoin.io/reference/built-in-actors/overview/#storagemarketactor)
+* [StorageMinerActor](https://docs.filecoin.io/reference/built-in-actors/overview/#storagemineractor)
+* [MultisigActor](https://docs.filecoin.io/reference/built-in-actors/overview/#multisigactor)
+* [PaymentChannelActor](https://docs.filecoin.io/reference/built-in-actors/overview/#paymentchannelactor)
+* [StoragePowerActor](https://docs.filecoin.io/reference/built-in-actors/overview/#storagepoweractor)
+* [VerifiedRegistryActor](https://docs.filecoin.io/reference/built-in-actors/overview/#verifiedregistryactor)
+* [SystemActor](https://docs.filecoin.io/reference/built-in-actors/overview/#systemactor)
+
+### CronActor
+
+The `CronActor` sends messages to the `StoragePowerActor` and `StorageMarketActor` at the end of each epoch. The messages sent by `CronActor` indicate to StoragePowerActor and StorageMarketActor how they should maintain internal state and process deferred events. This system actor is instantiated in the genesis block, and interacts directly with the FVM.
+
+### InitActor
+
+The `InitActor` can initialize new actors on the Filecoin network. This system actor is instantiated in the genesis block, and maintains a table resolving a public key and temporary actor addresses to their canonical ID addresses. The `InitActor` interacts directly with the FVM.
+
+### AccountActor
+
+The `AccountActor` is responsible for user accounts. Account actors are not created by the `InitActor`, but by sending a message to a public-key style address. The account actor updates the state tree with new actor address, and interacts directly with the FVM.
+
+### RewardActor
+
+The `RewardActor` manages unminted Filecoin tokens, and distributes rewards directly to miner actors, where they are locked for vesting. The reward value used for the current epoch is updated at the end of an epoch. The `RewardActor` interacts directly with the FVM.
+
+### StorageMarketActor
+
+The `StorageMarketActor` is responsible for processing and managing on-chain deals. This is also the entry point of all storage deals and data into the system. This actor keeps track of storage deals, and the of locked balances of both the client storing data and the storage provider. When a deal is posted on chain through the `StorageMarketActor`, the actor will first check if both transacting parties have sufficient balances locked up and include the deal on chain. Additionally, the `StorageMarketActor` holds _Storage Deal Collateral_ provided by the storage provider to collateralize deals. This collateral is returned to the storage provider when all deals in the sector successfully conclude. This actor does not interact directly with the FVM.
+
+### StorageMinerActor
+
+The `StorageMinerActor` is created by the `StoragePowerActor`, and is responsible for storage mining operations and the collection of mining proofs. This actor is a key part of the Filecoin storage mining subsystem, which ensures a storage miner can effectively commit storage to the Filecoin, handles the following:
+
+* Committing new storage
+* Continuously proving storage
+* Declaring storage faults
+* Recovering from storage faults
+
+This actor does not interact directly with the FVM.
+
+### MultisigActor
+
+The `MultisigActor` is responsible for dealing with operations involving the Filecoin wallet, and represents a group of transaction signers, with a maximum of 256. Signers may be external users or the `MultisigActor` itself. This actor does not interact directly with the FVM.
+
+### PaymentChannelActor
+
+The `PaymentChannelActor` creates and manages _payment channels_, a mechanism for off-chain microtransactions for Filecoin dApps to be reconciled on-chain at a later time with less overhead than a standard on-chain transaction, and no gas costs. Payment channels are uni-directional and can be funded by adding to their balance. To create a payment channel and deposit fund, a user calls the `PaymentChannelActor`. This actor does not interact directly with the FVM.
+
+### StoragePowerActor
+
+The `StoragePowerActor` is responsible for keeping track of the storage power allocated to each storage miner, and has the ability to create a `StorageMinerActor`. This actor does not interact directly with the FVM.
+
+### VerifiedRegistryActor
+
+The `VerifiedRegistryActor` is responsible for managing Filecoin Plus (Fil+) clients. This actor can add a verified client to the Fil+ program, remove and reclaim expired DataCap allocations and manage claims. This actor does not interact directly with the FVM.
+
+### SystemActor
+
+For more information on `SystemActor`, see the [source code](https://github.com/filecoin-project/specs-actors/blob/master/actors/builtin/system/system\_actor.go).
+
+## Access and invoke built-in actors
+
+Developers can access and invoke built-in actors the _Protocol API_ or the Zondax _filecoin.solidity_ API
+
+The Filecoin Solidity project is [currently in beta](https://docs.zondax.ch/fevm/filecoin-solidity#disclaimer-%EF%B8%8F%EF%B8%8F).
+
+* _Protocol API_, maintained by [Protocol Labs](https://protocol.ai/), …
+* _Filecoin.solidity_, maintained by [Zondax](https://docs.zondax.ch/), is a set of libraries that allows Solidity smart contracts to seamlessly call built-in actors methods. **Not all built-in actors and methods are supported** - for a complete list, see the [actors and methods supported](https://docs.zondax.ch/fevm/filecoin-solidity/api/#actors-and-methods-supported). For further information, including information on how to use the package, see the [official documentation](https://docs.zondax.ch/fevm/filecoin-solidity/) and the [GitHub repository](https://github.com/filecoin-project/filecoin-solidity).
+
+For information on how invoke and access built-in actors in your smart contracts, see the [developers guide](https://docs.filecoin.io/smart-contracts/developing-contracts/call-built-in-actors/).
diff --git a/reference/built-in-actors/filecoin.sol.md b/reference/built-in-actors/filecoin.sol.md
new file mode 100644
index 000000000..6d616e42d
--- /dev/null
+++ b/reference/built-in-actors/filecoin.sol.md
@@ -0,0 +1,333 @@
+---
+description: This page covers the built-in actors Filecoin.sol API.
+---
+
+# Filecoin.sol
+
+For conceptual information on built-in actors, including their purposes, how they work and more, see the [conceptual guide](https://docs.filecoin.io/reference/built-in-actors/overview/).
+
+## Prerequisites
+
+Before you can call a built-in actor using the API, you must [import filecoin.solidity using one of the available methods](https://docs.filecoin.io/smart-contracts/developing-contracts/call-built-in-actors/#using-filecoinsolidity).
+
+## Call a built-in actor
+
+For available actors and methods see [Available actors and methods](https://docs.filecoin.io/smart-contracts/developing-contracts/call-built-in-actors/#available-actors-and-methods).
+
+Once you’ve either imported particular contracts manually or simply installed `filecoin-solidity` using `npm`, create a callable method to access the built-in actor methods the way you normally would in a Solidity smart contract. Working examples of smart contracts that call built-in actor methods are available below.
+
+* [Account](https://docs.filecoin.io/reference/built-in-actors/filecoin-sol/#call-the-account-actor)
+* [DataCap](https://docs.filecoin.io/reference/built-in-actors/filecoin-sol/#call-the-datacap-actor)
+* [Miner](https://docs.filecoin.io/reference/built-in-actors/filecoin-sol/#call-the-miner-actor)
+* [Storage market](https://docs.filecoin.io/reference/built-in-actors/filecoin-sol/#call-the-storage-market-actor)
+* [Storage power](https://docs.filecoin.io/reference/built-in-actors/filecoin-sol/#call-the-storage-power-actor)
+* [Verified registry](https://docs.filecoin.io/reference/built-in-actors/filecoin-sol/#call-the-verified-registry-actor)
+
+For conceptual information on built-in actors, including their purposes, how they work and available types, see the [conceptual guide](https://docs.filecoin.io/reference/built-in-actors/overview/).
+
+### Call the account actor
+
+The following example imports the Account actor library and creates a callable method for each of the [available actor methods](https://docs.filecoin.io/reference/built-in-actors/filecoin-sol/#call-the-account-actor). For the full code, see [the GitHub repository](https://github.com/filecoin-project/filecoin-solidity/blob/master/contracts/v0.8/tests/account.test.sol).
+
+```solidity
+pragma solidity ^0.8.17;
+
+import "../types/AccountTypes.sol";
+import "../types/CommonTypes.sol";
+import "../AccountAPI.sol";
+import "../Utils.sol";
+
+contract AccountApiTest {
+ function authenticate_message(CommonTypes.FilActorId target, AccountTypes.AuthenticateMessageParams memory params) public {
+ AccountAPI.authenticateMessage(target, params);
+ }
+
+ function universal_receiver_hook(CommonTypes.FilActorId target, CommonTypes.UniversalReceiverParams memory params) public {
+ Utils.universalReceiverHook(target, params);
+ }
+}
+```
+
+### Call the DataCap actor
+
+The following example imports the DataCap actor library and creates a callable method for each of the [available actor methods](https://docs.filecoin.io/reference/built-in-actors/filecoin-sol/#call-the-datacap-actor). For the full code, see [the GitHub repository](https://github.com/filecoin-project/filecoin-solidity/blob/master/contracts/v0.8/tests/datacap.test.sol).
+
+```solidity
+pragma solidity ^0.8.17;
+
+import "../types/DataCapTypes.sol";
+import "../types/CommonTypes.sol";
+import "../cbor/BigIntCbor.sol";
+import "../DataCapAPI.sol";
+import "../Utils.sol";
+
+contract DataCapApiTest {
+ function name() public returns (string memory) {
+ return DataCapAPI.name();
+ }
+
+ function symbol() public returns (string memory) {
+ return DataCapAPI.symbol();
+ }
+
+ function total_supply() public returns (CommonTypes.BigInt memory) {
+ return DataCapAPI.totalSupply();
+ }
+
+ function balance(CommonTypes.FilAddress memory addr) public returns (CommonTypes.BigInt memory) {
+ return DataCapAPI.balance(addr);
+ }
+
+ function allowance(DataCapTypes.GetAllowanceParams memory params) public returns (CommonTypes.BigInt memory) {
+ return DataCapAPI.allowance(params);
+ }
+
+ function transfer(DataCapTypes.TransferParams memory params) public returns (DataCapTypes.TransferReturn memory) {
+ return DataCapAPI.transfer(params);
+ }
+
+ function transfer_from(DataCapTypes.TransferFromParams memory params) public returns (DataCapTypes.TransferFromReturn memory) {
+ return DataCapAPI.transferFrom(params);
+ }
+
+ function increase_allowance(DataCapTypes.IncreaseAllowanceParams memory params) public returns (CommonTypes.BigInt memory) {
+ return DataCapAPI.increaseAllowance(params);
+ }
+
+ function decrease_allowance(DataCapTypes.DecreaseAllowanceParams memory params) public returns (CommonTypes.BigInt memory) {
+ return DataCapAPI.decreaseAllowance(params);
+ }
+
+ function revoke_allowance(CommonTypes.FilAddress memory operator) public returns (CommonTypes.BigInt memory) {
+ return DataCapAPI.revokeAllowance(operator);
+ }
+
+ function burn(CommonTypes.BigInt memory amount) public returns (CommonTypes.BigInt memory) {
+ return DataCapAPI.burn(amount);
+ }
+
+ function burn_from(DataCapTypes.BurnFromParams memory params) public returns (DataCapTypes.BurnFromReturn memory) {
+ return DataCapAPI.burnFrom(params);
+ }
+
+ function handle_filecoin_method(uint64 method, uint64 codec, bytes calldata params) public pure {
+ Utils.handleFilecoinMethod(method, codec, params);
+ }
+}
+```
+
+### Call the storage market actor
+
+The following example imports the Storage market actor library and creates a callable method for each of the [available actor methods](https://docs.filecoin.io/reference/built-in-actors/filecoin-sol/#call-the-storage-market-actor). For the full code, see [the GitHub repository](https://github.com/filecoin-project/filecoin-solidity/blob/master/contracts/v0.8/tests/miner.test.sol).
+
+```solidity
+pragma solidity ^0.8.17;
+
+import "../MarketAPI.sol";
+import "../types/MarketTypes.sol";
+
+contract MarketApiTest {
+ function add_balance(CommonTypes.FilAddress memory providerOrClient, uint256 value) public payable {
+ MarketAPI.addBalance(providerOrClient, value);
+ }
+
+ function withdraw_balance(MarketTypes.WithdrawBalanceParams memory params) public returns (CommonTypes.BigInt memory) {
+ return MarketAPI.withdrawBalance(params);
+ }
+
+ function get_balance(CommonTypes.FilAddress memory addr) public returns (MarketTypes.GetBalanceReturn memory) {
+ return MarketAPI.getBalance(addr);
+ }
+
+ function get_deal_data_commitment(uint64 dealID) public returns (MarketTypes.GetDealDataCommitmentReturn memory) {
+ return MarketAPI.getDealDataCommitment(dealID);
+ }
+
+ function get_deal_client(uint64 dealID) public returns (uint64) {
+ return MarketAPI.getDealClient(dealID);
+ }
+
+ function get_deal_provider(uint64 dealID) public returns (uint64) {
+ return MarketAPI.getDealProvider(dealID);
+ }
+
+ function get_deal_label(uint64 dealID) public returns (string memory) {
+ return MarketAPI.getDealLabel(dealID);
+ }
+
+ function get_deal_term(uint64 dealID) public returns (MarketTypes.GetDealTermReturn memory) {
+ return MarketAPI.getDealTerm(dealID);
+ }
+
+ function get_deal_total_price(uint64 dealID) public returns (CommonTypes.BigInt memory) {
+ return MarketAPI.getDealTotalPrice(dealID);
+ }
+
+ function get_deal_client_collateral(uint64 dealID) public returns (CommonTypes.BigInt memory) {
+ return MarketAPI.getDealClientCollateral(dealID);
+ }
+
+ function get_deal_provider_collateral(uint64 dealID) public returns (CommonTypes.BigInt memory) {
+ return MarketAPI.getDealProviderCollateral(dealID);
+ }
+
+ function get_deal_verified(uint64 dealID) public returns (bool) {
+ return MarketAPI.getDealVerified(dealID);
+ }
+
+ function get_deal_activation(uint64 dealID) public returns (MarketTypes.GetDealActivationReturn memory) {
+ return MarketAPI.getDealActivation(dealID);
+ }
+
+ function publish_storage_deals(MarketTypes.PublishStorageDealsParams memory params) public returns (MarketTypes.PublishStorageDealsReturn memory) {
+ return MarketAPI.publishStorageDeals(params);
+ }
+}
+```
+
+### Call the miner actor
+
+The following example imports the Account actor library and creates a callable method for each of the [available actor methods](https://docs.filecoin.io/reference/built-in-actors/filecoin-sol/#call-the-miner-actor). For the full code, see [the GitHub repository](https://github.com/filecoin-project/filecoin-solidity/blob/master/contracts/v0.8/tests/miner.test.sol).
+
+```solidity
+pragma solidity ^0.8.17;
+
+import "../MinerAPI.sol";
+import "../types/MinerTypes.sol";
+
+contract MinerApiTest {
+ function get_owner(CommonTypes.FilActorId target) public returns (MinerTypes.GetOwnerReturn memory) {
+ return MinerAPI.getOwner(target);
+ }
+
+ function change_owner_address(CommonTypes.FilActorId target, CommonTypes.FilAddress memory addr) public {
+ MinerAPI.changeOwnerAddress(target, addr);
+ }
+
+ function is_controlling_address(CommonTypes.FilActorId target, CommonTypes.FilAddress memory addr) public returns (bool) {
+ return MinerAPI.isControllingAddress(target, addr);
+ }
+
+ function get_sector_size(CommonTypes.FilActorId target) public returns (uint64) {
+ return MinerAPI.getSectorSize(target);
+ }
+
+ function get_available_balance(CommonTypes.FilActorId target) public returns (CommonTypes.BigInt memory) {
+ return MinerAPI.getAvailableBalance(target);
+ }
+
+ function get_vesting_funds(CommonTypes.FilActorId target) public returns (MinerTypes.GetVestingFundsReturn memory) {
+ return MinerAPI.getVestingFunds(target);
+ }
+
+ function change_beneficiary(CommonTypes.FilActorId target, MinerTypes.ChangeBeneficiaryParams memory params) public {
+ return MinerAPI.changeBeneficiary(target, params);
+ }
+
+ function get_beneficiary(CommonTypes.FilActorId target) public returns (MinerTypes.GetBeneficiaryReturn memory) {
+ return MinerAPI.getBeneficiary(target);
+ }
+
+ function change_worker_address(CommonTypes.FilActorId target, MinerTypes.ChangeWorkerAddressParams memory params) public {
+ MinerAPI.changeWorkerAddress(target, params);
+ }
+
+ function change_peer_id(CommonTypes.FilActorId target, CommonTypes.FilAddress memory newId) public {
+ MinerAPI.changePeerId(target, newId);
+ }
+
+ function change_multiaddresses(CommonTypes.FilActorId target, MinerTypes.ChangeMultiaddrsParams memory params) public {
+ MinerAPI.changeMultiaddresses(target, params);
+ }
+
+ function repay_debt(CommonTypes.FilActorId target) public {
+ MinerAPI.repayDebt(target);
+ }
+
+ function confirm_change_worker_address(CommonTypes.FilActorId target) public {
+ MinerAPI.confirmChangeWorkerAddress(target);
+ }
+
+ function get_peer_id(CommonTypes.FilActorId target) public returns (CommonTypes.FilAddress memory) {
+ return MinerAPI.getPeerId(target);
+ }
+
+ function get_multiaddresses(CommonTypes.FilActorId target) public returns (MinerTypes.GetMultiaddrsReturn memory) {
+ return MinerAPI.getMultiaddresses(target);
+ }
+
+ function withdraw_balance(CommonTypes.FilActorId target, CommonTypes.BigInt memory amount) public returns (CommonTypes.BigInt memory) {
+ return MinerAPI.withdrawBalance(target, amount);
+ }
+}
+```
+
+### Call the storage power actor
+
+The following example imports the Storage power actor library and creates a callable method for each of the [available actor methods](https://docs.filecoin.io/reference/built-in-actors/filecoin-sol/#call-the-storage-power-actor). For the full code, see [the GitHub repository](https://github.com/filecoin-project/filecoin-solidity/blob/master/contracts/v0.8/tests/power.test.sol).
+
+```solidity
+pragma solidity ^0.8.17;
+
+import "../types/PowerTypes.sol";
+import "../types/CommonTypes.sol";
+import "../PowerAPI.sol";
+
+contract PowerApiTest {
+ function create_miner(PowerTypes.CreateMinerParams memory params, uint256 value) public payable returns (PowerTypes.CreateMinerReturn memory) {
+ return PowerAPI.createMiner(params, value);
+ }
+
+ function miner_count() public returns (uint64) {
+ return PowerAPI.minerCount();
+ }
+
+ function miner_consensus_count() public returns (int64) {
+ return PowerAPI.minerConsensusCount();
+ }
+
+ function network_raw_power() public returns (CommonTypes.BigInt memory) {
+ return PowerAPI.networkRawPower();
+ }
+
+ function miner_raw_power(uint64 minerID) public returns (PowerTypes.MinerRawPowerReturn memory) {
+ return PowerAPI.minerRawPower(minerID);
+ }
+}
+```
+
+### Call the verified registry actor
+
+The following example imports the verified registry actor library and creates a callable method for each of the [available actor methods](https://docs.filecoin.io/reference/built-in-actors/filecoin-sol/#call-the-verified-registry-actor). For the full code, see [the GitHub repository](https://github.com/filecoin-project/filecoin-solidity/blob/master/contracts/v0.8/tests/verifreg.test.sol).
+
+```solidity
+pragma solidity ^0.8.17;
+
+import "../types/VerifRegTypes.sol";
+import "../types/CommonTypes.sol";
+import "../VerifRegAPI.sol";
+
+contract VerifRegApiTest {
+ function get_claims(VerifRegTypes.GetClaimsParams memory params) public returns (VerifRegTypes.GetClaimsReturn memory) {
+ return VerifRegAPI.getClaims(params);
+ }
+
+ function add_verified_client(VerifRegTypes.AddVerifiedClientParams memory params) public {
+ VerifRegAPI.addVerifiedClient(params);
+ }
+
+ function remove_expired_allocations(
+ VerifRegTypes.RemoveExpiredAllocationsParams memory params
+ ) public returns (VerifRegTypes.RemoveExpiredAllocationsReturn memory) {
+ return VerifRegAPI.removeExpiredAllocations(params);
+ }
+
+ function extend_claim_terms(VerifRegTypes.ExtendClaimTermsParams memory params) public returns (CommonTypes.BatchReturn memory) {
+ return VerifRegAPI.extendClaimTerms(params);
+ }
+
+ function remove_expired_claims(VerifRegTypes.RemoveExpiredClaimsParams memory params) public returns (VerifRegTypes.RemoveExpiredClaimsReturn memory) {
+ return VerifRegAPI.removeExpiredClaims(params);
+ }
+}
+```
diff --git a/reference/built-in-actors/protocol-api.md b/reference/built-in-actors/protocol-api.md
new file mode 100644
index 000000000..28aca1f6b
--- /dev/null
+++ b/reference/built-in-actors/protocol-api.md
@@ -0,0 +1,1308 @@
+---
+description: This page covers the Built-in actors Protocol API.
+---
+
+# Protocol API
+
+The protocol level built-in actors API is split into the following sections:
+
+* [Account actor](https://docs.filecoin.io/reference/built-in-actors/protocol-api/#account-actor)
+* [Datacap](https://docs.filecoin.io/reference/built-in-actors/protocol-api/#datacap)
+* [Miner](https://docs.filecoin.io/reference/built-in-actors/protocol-api/#miner)
+* [Multisig](https://docs.filecoin.io/reference/built-in-actors/protocol-api/#multisig)
+* [Storage market actor](https://docs.filecoin.io/reference/built-in-actors/protocol-api/#storage-market-actor)
+* [Storage power actor](https://docs.filecoin.io/reference/built-in-actors/protocol-api/#storage-power-actor)
+* [Verified registry actor](https://docs.filecoin.io/reference/built-in-actors/protocol-api/#verified-registry-actor)
+
+## Account actor
+
+The account actor is responsible for user account. If you want to call these methods in your smart contracts, you need to specify method number of that method you want to invoke. Please refer the each method for its method number.
+
+### AuthenticateMessage
+
+```go
+func AuthenticateMessage(params AuthenticateMessage) EmptyValue ()
+```
+
+Authenticates whether the provided signature is valid for the provided message.
+
+`uint` AuthenticateMessageMethodNum = 2643134072.
+
+Parameters:
+
+* `struct` AuthenticateMessageParams
+ * `bytes` AuthenticateMessageParamsSignature - it should be a raw byte of signature, NOT a serialized signature object with a signatureType.
+ * `bytes` Message - The message which is signed by the corresponding account address.
+
+Results:
+
+* `struct` EmptyValue.
+
+### UniversalReceiverHook
+
+```go
+func UniversalReceiverHook(params RawBytes) EmptyValue ()
+```
+
+Whenever the account receives transfers, this method will be invoked.
+
+`uint` UniversalReceiverHookMethodNum = 3726118371.
+
+Parameters:
+
+* `bytes[]` RawBytes - passes the bytes through how it is received.
+
+Results:
+
+* `struct` EmptyValue - always success.
+
+## Datacap
+
+DataCap Actor is responsible for DataCap token management. The ActorCode for DataCap actor is `hex"0007"` which will be used to call DataCap actor APIs. You also need to specify the method number of which method you want to invoke. Refer to each method for its method number.
+
+### Name
+
+```go
+func Name() String {}
+```
+
+Return the name of DataCap token which is ‘DataCap’.
+
+`Unit` NameMethodNum : 48890204.
+
+Parameters:
+
+* null
+
+Results:
+
+* `String` : DataCap
+
+### Symbol
+
+```go
+func Symbol() String {}
+```
+
+Return the symbol of DataCap token which is ‘DCAP’.
+
+`unit` SymbolMethodNum: 2061153854.
+
+Parameters:
+
+* null
+
+Results:
+
+* `String` : DCAP
+
+### TotalSupply
+
+```go
+func TotalSupply() TokenAmount {}
+```
+
+Return the total supply of the DataCap token.
+
+`uint` TotalSupplyMethodNum: 114981429.
+
+Parameters:
+
+* null
+
+Results:
+
+* `int256` TokenAmount - Total DataCap token supply.
+
+### Balance
+
+```go
+func Balance(params Address) TokenAmount {}
+```
+
+Return the DataCap token balance for the wallet address.
+
+`unit` BalanceOfMethodNum: 3261979605.
+
+Parameters:
+
+* `bytes` Address - the wallet address.
+
+Results:
+
+* `int256` TokenAmount - the DataCap token balance for the specified wallet address.
+
+### Transfer
+
+```go
+func Transfer(params TransferParams) TransferReturn {}
+```
+
+Transfers DataCap tokens from caller address to the to address.
+
+`uint` TransferMethodNum = 80475954;
+
+Parameters:
+
+* `struct` TransferParams
+ * `bytes` To - the address to receive DataCap token.
+ * `int256` Amount - A non-negative amount to transfer.
+ * `bytes[]` OperatorData - Arbitrary data to pass on via the receiver hook.
+
+Results:
+
+* `struct` TransferReturn
+ * `int256` FromBalance - the balance of from\_address.
+ * `int256` ToBalance - the balance of to\_address.
+ * `bytes` RecipientData: data returned from receive hook.
+
+### TransferFrom
+
+```go
+func TransferFrom(params TransferFromParams) TransferFromReturn {}
+```
+
+Transfers DataCap between the from\_address to the to\_address.
+
+`uint` TransferFromMethodNum = 3621052141.
+
+Params:
+
+* `bytes` TransferFromParams
+ * `bytes` From - the address to send DataCap Token.
+ * `bytes` To - the address to receive DataCap Token.
+ * `int256` Amount - A non-negative amount to transfer.
+ * `bytes` OperatorData: Arbitrary data to pass on via the receiver hook.
+
+Results:
+
+* `struct` TransferFromReturn
+ * `int256` FromBalance - the balance of from\_address.
+ * `int256` ToBalance - the balance of to\_address.
+ * `int256` Allowance - the remaining allowance of owner address.
+ * `bytes` RecipientData - data returned from receive hook.
+
+### IncreaseAllowance
+
+```go
+func IncreaseAllowance(params IncreaseAllowanceParams) TokenAmount {}
+```
+
+Increase the DataCap token allowance that an operator can control by the requested amount.
+
+`uint` IncreaseAllowanceMethodNum = 1777121560.
+
+Params:
+
+* `struct` IncreaseAllowanceParams
+ * `bytes` Operator - the wallet address of the operator.
+ * `int256` increaseAmount - increase DataCap token allowance for the operator address.
+
+Results:
+
+* `int256` TokenAmount - the new DataCap allowance of the operator address.
+
+### DecreaseAllowance
+
+```go
+func DecreaseAllowance(params DecreaseAllowanceParams) TokenAmount {}
+```
+
+Decrease the DataCap token allowance that an operator controls of the owner’s balance by the requested amount.
+
+`uint` DecreaseAllowanceMethodNum = 1529376545;
+
+Params:
+
+* `struct` DecreaseAllowanceParams
+ * `bytes` Operator - the wallet address of the operator.
+ * `int256` IncreaseAmount - the decreased DataCap token allowance of the operator address.
+
+Results:
+
+* `int256` TokenAmount - the new DataCap allowance of the operator address.
+
+### RevokeAllowance
+
+```go
+func RevokeAllowance(params RevokeAllowanceParams) TokenAmount {}
+```
+
+Revoke the DataCap token allowance from the operator and set the operator’s allowance in behave of owner/caller address to 0.
+
+`uint` RevokeAllowanceMethodNum = 2765635761.
+
+Params:
+
+* `struct` RevokeAllowanceParams
+ * `bytes` Operator - the wallet address of the operator.
+
+Results:
+
+* `int256` TokenAmount - the old Allowance amount of the operator address.
+
+### Burn
+
+```go
+func Burn(params BurnParams) TokenAmount {}
+```
+
+Burn an amount of DataCap token from the owner/caller address, decreasing total token supply.
+
+`uint` BurnMethodNum = 1434719642.
+
+Params:
+
+* `struct` BurnParams
+ * `int256` Amount - the amount the DataCap token to be burned.
+
+Results:
+
+* `int256` TokenAmount - the updated DataCap token balance of the owner/caller address.
+
+### BurnFrom
+
+```go
+func BurnFrom(params BurnFromParams) BurnFromReturn {}
+```
+
+Burn an amount of DataCap token from the specified address (owner address), decrease the allowance of operator/caller, and decrease total token supply.
+
+`uint` BurnFromMethodNum = 2979674018.
+
+Params:
+
+* `struct` BurnFromParams
+ * `bytes` Owner - the wallet address of the owner.
+ * `int256` Amount - the amount of DataCap token to be burned.
+
+Results:
+
+* `struct` BurnFromReturn
+ * `bytes` Owner - the wallet address of the owner.
+ * `int256` Amount - the new balance of owner wallet.
+
+### Allowance
+
+```go
+func Allowance(params GetAllowanceParams) TokenAmount {}
+```
+
+Return the allowance between owner and operator address.
+
+`uint` AllowanceMethodNum = 4205072950;
+
+Params:
+
+* `struct` GetAllowanceParams
+ * `bytes` Owner : the wallet address of the owner.
+ * `bytes` Operator : the wallet address of the owner.
+
+Results:
+
+* `int256` TokenAmount - the allowance that an operator can control of an owner’s allowance.
+
+## Miner
+
+The miner built-in actor responsible to deal with storage mining operations and collect proof. To interact with a specific storage provider, you must use their miner address to invoke the methods in the built-in miner actor. You also need to specify the method number for the method you want to invoke. Please refer to each method for its method number.
+
+### GetPeerID
+
+```go
+func GetPeerID() GetPeerIDReturn {}
+```
+
+Return the Peer ID for the caller/miner address.
+
+`uint` GetPeerIDMethodNum = 2812875329.
+
+Params:
+
+* null
+
+Results:
+
+* `struct` GetPeerIDReturn
+ * `bytes` PeerID - the peer ID for the specified storage provider/miner.
+
+### ChangePeerID
+
+```go
+func ChangePeerID(params ChangePeerIDParams) EmptyValue {}
+```
+
+Change the peer ID for the caller/miner address.
+
+`uint` ChangePeerIDMethodNum = 1236548004.
+
+Params:
+
+* `struct` ChangePeerIDParams
+ * `bytes` NewID - the new peer ID.
+
+Results:
+
+* `struct` EmptyValue
+
+### GetMultiaddrs
+
+```go
+func GetMultiaddrs() GetMultiAddrsReturn {}
+```
+
+Returns the multi-signature address for this caller/miner address.
+
+`uint` GetMultiaddrsMethodNum = 1332909407.
+
+Params:
+
+* null
+
+Results:
+
+* `struct` GetMultiAddrsReturn
+ * `byte[]` MultiAddrs - the multi-signature address.
+
+### ChangeMultiaddrs
+
+```go
+func ChangeMultiaddrs(params ChangeMultiaddrsParams) EmptyValue {}
+```
+
+Change the multi-signature address for this caller/miner address.
+
+`uint` ChangeMultiaddrsMethodNum = 1063480576.
+
+Params:
+
+* `struct` ChangeMultiaddrsParams
+ * `byte[]` NewMultiaddrs - the new multi-signature address.
+
+Results:
+
+* `struct` EmptyValue
+
+### ChangeWorkerAddress
+
+```go
+func ChangeWorkerAddress(params ChangeWorkerAddressParams) EmptyValue {}
+```
+
+Change the worker address for the caller/miner address, and overwrite the existing addresses with the new control addresses passed in the params.
+
+`uint` ChangeOwnerAddressMethodNum = 1010589339.
+
+Params:
+
+* `struct` ChangeWorkerAddressParams
+ * `byte` NewWorker - the new worker address.
+ * `byte[]` NewControlAddrs - the new controller addresses.
+
+Results:
+
+* `struct` EmptyValue
+
+### ConfirmChangeWorkerAddress
+
+```go
+func ConfirmChangeWorkerAddress() EmptyValue {}
+```
+
+Confirm the worker address has been changed for the caller/miner address.
+
+`uint` ConfirmChangeWorkerAddressMethodNum = 2354970453.
+
+Params:
+
+* null
+
+Results:
+
+* `struct` EmptyValue
+
+### RepayDebt
+
+```go
+func RepayDebt() EmptyValue {}
+```
+
+Repay as much fee debt as possible for the caller/miner address.
+
+`uint` RepayDebtMethodNum = 3665352697.
+
+Params:
+
+* null
+
+Results:
+
+* `struct` EmptyValue
+
+### GetOwner
+
+```go
+func GetOwner() GetOwnerReturn {}
+```
+
+Return the owner address of the caller/miner address.
+
+`uint` GetOwnerMethodNum = 3275365574.
+
+Params:
+
+* null
+
+Results:
+
+* `struct` GetOwnerReturn
+ * `byte` Owner - owner address.
+
+### ChangeOwnerAddress
+
+```go
+func ChangeOwnerAddress(bytes address) {}
+```
+
+Proposes or confirms a change of owner address.
+
+`uint` ChangeOwnerAddressMethodNum = 1010589339.
+
+Params:
+
+* `bytes` Address - the new owner address.
+
+Results:
+
+* `struct` EmptyValue
+
+### GetBeneficiary
+
+```go
+func GetBeneficiary() GetBeneficiaryReturn {}
+```
+
+Return the currently active and proposed beneficiary information.
+
+`uint` GetBeneficiaryMethodNum = 4158972569.
+
+Params:
+
+* null
+
+Results:
+
+* `struct` GetBeneficiaryReturn
+ * `struct` ActiveBeneficiary - current active beneficiary.
+ * `byte` Beneficiary - the address of the beneficiary.
+ * `struct` BeneficiaryTerm
+ * `int256` Quota - the quota token amount.
+ * `int256` UsedQuota - the used quota token amount.
+ * `uint64` Expiration - the epoch that the quota will be expired.
+ * `struct` PendingBeneficiaryChange - the proposed and pending beneficiary.
+ * `bytes` newBeneficiary - the new beneficiary address.
+ * `int256` NewQuota - the new quota token amount.
+ * `uint64` NewExpiration - the epoch that the new quota will be expired.
+ * `bool` ApprovedByBeneficiary - if this proposal is approved by the beneficiary or not.
+ * `bool` ApprovedByNominee - if this proposal is approved by the nominee or not.
+
+### ChangeBeneficiary
+
+```go
+func ChangeBeneficiary(params ChangeBeneficiaryParams) EmptyValue {}
+```
+
+Propose or confirm a change of beneficiary information.
+
+`uint` ChangeBeneficiaryMethodNum = 1570634796.
+
+Params:
+
+* `struct` ChangeBeneficiaryParams
+ * `bytes` newBeneficiary - the new beneficiary address.
+ * `int256` NewQuota - the new quota token amount.
+ * `uint64` NewExpiration - the epoch that the new quota will be expired.
+
+Results:
+
+* `struct` EmptyValue
+
+### IsControllingAddress
+
+```go
+func IsControllingAddress(params IsControllingAddressParams) IsControllingAddressReturn {}
+```
+
+Returns whether the provided address is the Owner, the Worker, or any of the control addresses.
+
+`uint` IsControllingAddressMethodNum = 348244887.
+
+Params:
+
+* `byte` IsControllingAddressParams - the address to be verified.
+
+Results:
+
+* `bool` IsControllingAddressReturn - if the specified address is the control address.
+
+### GetSectorSize
+
+```go
+func GetSectorSize() GetSectorSizeReturn {}
+```
+
+Returns the miner’s sector size.
+
+`uint` GetSectorSizeMethodNum = 3858292296;
+
+Params:
+
+* null
+
+Results:
+
+* `struct` GetSectorSizeReturn
+ * `unit64` SectorSize - the sector size of this miner.
+
+### GetAvailableBalance
+
+```go
+func GetAvailableBalance() GetAvailableBalanceReturn {}
+```
+
+Returns the available balance of this miner.
+
+`uint` GetAvailableBalanceMethodNum = 4026106874.
+
+Params:
+
+* null
+
+Results:
+
+* `int256` GetAvailableBalanceReturn - the available token balance amount.
+
+### WithdrawBalance
+
+```go
+func WithdrawBalance(params WithdrawBalanceParams) WithdrawBalanceReturn {}
+```
+
+Withdraw the token balance for this miner.
+
+Params:
+
+* `struct` WithdrawBalanceParams
+ * `int256` AmountRequested - withdraw token amount.
+
+Results:
+
+* `int256` WithdrawBalanceReturn - the token amount withdrawn.
+
+### GetVestingFunds
+
+```go
+func GetVestingFunds() GetVestingFundsReturn {}
+```
+
+Return the funds vesting in this miner as a list of (vesting\_epoch, vesting\_amount) tuples.
+
+`uint` GetVestingFundsMethodNum = 1726876304.
+
+Params:
+
+* null
+
+Results:
+
+* `struct` GetVestingFundsReturn
+ * `struct VestingFunds[]` Funds
+ * `int64` Epoch - the epoch of funds vested.
+ * `int256` Amount - the number of funds vested.
+
+## Multisig
+
+Multisig built-in actor is responsible for dealing with operations involving the Filecoin wallet. To interact with a specific multi-signature wallet address, you need to use this wallet address to invoke the methods in the built-in multisig actor. You also need to specify the method number of which method you want to invoke. Please refer to each method for its method number.
+
+### Propose
+
+```go
+func Propose(params ProposeParams) ProposeReturn {...}
+```
+
+Propose a token transfer transaction for signers to approve. The proposer automatically approves this transaction.
+
+`uint` ProposeMethodNum = 1696838335.
+
+Params:
+
+* `struct` ProposeParams
+ * `bytes` ToAddress - the address to receive the token.
+ * `int256` Value - the token amount to be transferred.
+ * `uint64` Method: ?
+ * `bytep[]` Params: ?
+
+Results:
+
+* `struct` ProposeReturn
+ * `int64` TxnID - the ID of the proposed transaction.
+ * `bool` Applied - if the transaction was applied as proposed or not?
+ * `uint31` Code - the exit code of the transaction. If `Applied` is `false` this field can be ignored.
+ * `bytes` Ret - the return value of the transaction. If `Applied` is `false` this field can be ignored.
+
+### Approve
+
+```go
+func Approve(params TxnIDParams) ApproveReturn {}
+```
+
+Other signers of the multi-signature address can use this method to approve the proposed messages.
+
+`uint` ApproveMethodNum = 1289044053.
+
+Params:
+
+* `struct` TxnIDParams
+ * `int64` ID - the signed message ID.
+ * `bytes` ProposalHash - Hash of proposal to ensure an operation can only apply to a specific proposal.
+
+Results:
+
+* `struct` ApproveReturn
+ * `bool` Applied - if the transaction was applied as proposed or not?
+ * `uint31` Code - the exit code of the transaction. If `Applied` is `false` this field can be ignored.
+ * `bytes` Ret - the return value of the transaction. If `Applied` is `false` this field can be ignored.
+
+### Cancel
+
+```go
+func Cancel(param TxnIDParams) EmptyValue {}
+```
+
+Multi-signature wallet signer to cancel a pending multi-signatures transaction.
+
+`uint` CancelMethodNum = 3365893656.
+
+Params:
+
+* `struct` TxnIDParams
+ * `int64` ID - the signed message ID.
+ * `bytes` ProposalHash - Hash of proposal to ensure an operation can only apply to a specific proposal.
+
+Results:
+
+* `struct` EmptyValue.
+
+### AddSigner
+
+```go
+func AddSigner(params AddSignerParams) EmptyValue {}
+```
+
+Add a signer to the multi-signature wallet.
+
+`uint` AddSignerMethodNum = 3028530033.
+
+Params:
+
+* `struct` AddSignerParams
+ * `bytes` Signer - the new signer address.
+ * `bool` Increase - increase threshold or not.
+
+Results:
+
+* `struct` EmptyValue.
+
+### RemoveSigner
+
+```go
+func RemoveSigner(params RemoveSignerParams) EmptyValue {}
+```
+
+Remove a signer from the multi-signature wallet.
+
+`uint` RemoveSignerMethodNum = 21182899.
+
+Params:
+
+* `struct` RemoveSignerParams
+ * `bytes` Signer - the signer address to be removed.
+ * `bool` Decrease - decrease threshold or not. Only able to decrease when the threshold is larger than 2.
+
+Results:
+
+* `struct` EmptyValue.
+
+### SwapSigner
+
+```go
+func SwapSigner(params SwapSignerParams) EmptyValue {}
+```
+
+Swap signers for the multi-signature wallet.
+
+`uint` SwapSignerMethodNum = 3968117037;
+
+Params:
+
+* `struct` SwapSignerParams
+ * `bytes` From - the signer address to be removed from the multi-signature wallet.
+ * `bytes` To - the signer address to be added to the multi-signature wallet.
+
+Results:
+
+* `struct` EmptyValue.
+
+### ChangeNumApprovalsThreshold
+
+```go
+func ChangeNumApprovalsThreshold(params ChangeNumApprovalsThresholdParams) EmptyValue {}
+```
+
+Change the threshold number required for the approvals for the multi-signature wallet.
+
+`uint` ChangeNumApprovalsThresholdMethodNum = 3375931653.
+
+Params:
+
+* `struct` ChangeNumApprovalsThresholdParams
+ * `unit64` NewThreshold - the new threshold number.
+
+Results:
+
+* `struct` EmptyValue.
+
+### LockBalance
+
+```go
+func LockBalance(params LockBalanceParams) EmptyValue {}
+```
+
+Lock a number of tokens in a multi-signature wallet from the `start` epoch to the `unlock` epoch.
+
+`uint` LockBalanceMethodNum = 1999470977.
+
+Params:
+
+* `struct` LockBalanceParams
+ * `int64` StartEpoch - the epoch to start locking the balance.
+ * `int64` UnlockDuration - the epoch to unlock the balance.
+ * `int256` Amount - the amount of token to be locked.
+
+Results:
+
+* `struct` EmptyValue.
+
+## Storage market actor
+
+Storage market actor is responsible for managing storage and retrieval deals. The ActorCode for storage market actor is `hex"0005"` which will be used to call this actor. You also need to specify the method number of the method you want to invoke. Please refer to each method for its method number.
+
+### AddBalance
+
+```go
+func AddBalance(address Address) EmptyValue {}
+```
+
+Deposit the received FIL token, which is received along with this message, into the balance held in the escrow address of the provider or client address.
+
+`uint` AddBalanceMethodNum = 822473126.
+
+Params:
+
+* `bytes` Address - the address of the provider or client.
+
+Results:
+
+* `struct` EmptyValue.
+
+### GetBalance
+
+```go
+func GetBalance(address Address) GetBalanceReturn {}
+```
+
+Return the escrow balance and locked amount for an address.
+
+`uint` GetBalanceMethodNum = 726108461.
+
+Params:
+
+* `bytes` address - the wallet address to request balance.
+
+Results:
+
+* `struct` GetBalanceReturn
+ * `int256` Balance - the escrow balance for this address.
+ * `int256` Locked - the escrow-locked amount for this address.
+
+### WithdrawBalance
+
+```go
+func WithdrawBalance(params WithdrawBalanceParams) WithdrawBalanceReturn {}
+```
+
+Withdraw the specified amount from the balance held in escrow.
+
+`uint` WithdrawBalanceMethodNum = 2280458852.
+
+Params:
+
+* `struct` WithdrawBalanceParams
+ * `bytes` ProviderOrClientAddress - the address of the provider or client.
+ * `int256` TokenAmount - the token amount to withdraw.
+
+Results:
+
+* `struct` WithdrawBalanceReturn
+ * `int256` AmountWithdraw - the token amount withdrawn.
+
+### PublishStorageDeals
+
+```go
+func PublishStorageDeals(params PublishStorageDealsParams) PublishStorageDealsReturn {}
+```
+
+Publish a new set of storage deals that are not yet included in a sector.
+
+`uint` PublishStorageDealsMethodNum = 2236929350.
+
+Params:
+
+* `struct` PublishStorageDealsParams
+ * `struct ClientDealProposal[]` Deals - list of deal proposals signed by a client
+ * `struct DealProposal` Proposal
+ * `bytes` PieceCID.
+ * `uint64` PieceSize - the size of the piece.
+ * `bool` VerifiedDeal - if the deal is verified or not.
+ * `bytes` Client - the address of the storage client.
+ * `bytes` Provider - the address of the storage provider.
+ * `string` Label - any label that the client chooses for the deal.
+ * `int64` StartEpoch - the chain epoch to start the deal.
+ * `int64` EndEpoch - the chain epoch to end the deal.
+ * `int256` StoragePricePerEpoch - the token amount to pay to the provider per epoch.
+ * `int256` ProviderCollateral - the token amount as collateral paid by the provider.
+ * `int256` ClientCollateral - the token amount as collateral paid by the client.
+ * `bytes` ClientSignature - the signature signed by the client.
+
+Results:
+
+* `struct` PublishStorageDealsReturn
+ * `uint64[]` IDs - returned storage deal IDs.
+ * `bytes` ValidDeals - represent all the valid deals.
+
+### GetDealDataCommitment
+
+```go
+func GetDealDataCommitment(params GetDealDataCommitmentParams) GetDealDataCommitmentReturn {}
+```
+
+Return the data commitment and size of a deal proposal.
+
+`uint` GetDealDataCommitmentMethodNum = 1157985802.
+
+Params:
+
+* `uint64` GetDealDataCommitmentParams - Deal ID.
+
+Results:
+
+* `struct` GetDealDataCommitmentReturn
+ * `bytes` Data - the data commitment of this deal.
+ * `uint64` Size - the size of this deal.
+
+### GetDealClient
+
+```go
+func GetDealClient(params GetDealClientParams) GetDealClientReturn {}
+```
+
+Return the client of the deal proposal.
+
+`uint` GetDealClientMethodNum = 128053329.
+
+Params:
+
+* `uint64` GetDealClientParams - CID of the deal proposal.
+
+Results:
+
+* `bytes` GetDealClientReturn - the wallet address of the client.
+
+### GetDealProvider
+
+```go
+func GetDealProvider(params GetDealProviderParams) GetDealProviderReturn {}
+```
+
+Return the provider of a deal proposal.
+
+`uint` GetDealProviderMethodNum = 935081690.
+
+Params:
+
+* `uint64` GetDealProviderParams - CID of the deal proposal.
+
+Results:
+
+* `bytes` GetDealProviderReturn - the wallet address of the provider.
+
+### GetDealLabel
+
+```go
+func GetDealLabel(params GetDealLabelParams) GetDealLabelReturn {}
+```
+
+Return the label of a deal proposal.
+
+`uint` GetDealLabelMethodNum = 46363526.
+
+Params:
+
+* `uint64` GetDealLabelParams - CID of the deal proposal.
+
+Results:
+
+* `string` GetDealLabelReturn - the label of this deal.
+
+### GetDealTerm
+
+```go
+func GetDealTerm(params GetDealTermParams) GetDealTermReturn {}
+```
+
+Return the start epoch and duration(in epochs) of a deal proposal.
+
+`uint` GetDealTermMethodNum = 163777312.
+
+Params:
+
+* `uint64` GetDealTermParams - CID of the deal proposal.
+
+Results:
+
+* `struct`GetDealTermReturn
+ * `int64` Start - the chain epoch to start the deal.
+ * `int64` End - the chain epoch to end the deal.
+
+### GetDealTotalPrice
+
+```go
+func GetDealTotalPrice(params GetDealTotalPriceParams) GetDealTotalPriceReturn {}
+```
+
+Return the total price that will be paid from the client to the provider for this deal.
+
+`uint` GetDealEpochPriceMethodNum = 4287162428.
+
+Params:
+
+* `uint64` GetDealTotalPriceParams - CID of the deal proposal.
+
+Results:
+
+* `int256` GetDealTotalPriceReturn - the token amount that will be paid by the client to the provider.
+
+### GetDealClientCollateral
+
+```go
+func GetDealClientCollateral(params GetDealClientCollateralParams) GetDealClientCollateralReturn {}
+```
+
+Return the client collateral requirement for a deal proposal.
+
+`uint` GetDealClientCollateralMethodNum = 200567895.
+
+Params:
+
+* `uint64` GetDealClientCollateralParams - CID of the deal proposal.
+
+Results:
+
+* `int256` GetDealClientCollateralReturn - the token amount as collateral paid by the client.
+
+### GetDealProviderCollateral
+
+```go
+func GetDealProviderCollateral(params GetDealProviderCollateralParams) GetDealProviderCollateralReturn {}
+```
+
+Return the provided collateral requirement for a deal proposal.
+
+`uint` GetDealProviderCollateralMethodNum = 2986712137.
+
+Params:
+
+* `uint64` GetDealProviderCollateralParams - CID of the deal proposal.
+
+Results:
+
+* `int256` GetDealProviderCollateralReturn - the token amount as collateral paid by the provider.
+
+### GetDealVerified
+
+```go
+func GetDealVerified(params GetDealVerifiedParams) GetDealVerifiedReturn {}
+```
+
+Return the verified flag for a deal proposal.
+
+`uint` GetDealVerifiedMethodNum = 2627389465.
+
+Params:
+
+* `uint64` GetDealVerifiedParams - CID of the deal proposal.
+
+Results:
+
+* `bool` GetDealVerifiedReturn - if the deal is verified or not.
+
+### GetDealActivation
+
+```go
+func GetDealActivation(params GetDealActivationParams) GetDealActivationReturn {}
+```
+
+Return the activation state for a deal.
+
+`uint` GetDealActivationParams = 2567238399.
+
+Params:
+
+* `uint64` GetDealVerifiedParams - CID of the deal proposal.
+
+Results:
+
+* `struct` GetDealActivationReturn
+ * `int64` Activated - Epoch at which the deal was activated, or -1.
+ * `int64` Terminated -Epoch at which the deal was terminated abnormally, or -1.
+
+## Storage power actor
+
+Strorage power actor is responsible for keeping track of the storage power allocated at each storage miner. The ActorCode for the built-in storage power actor is `hex"0004"` which will be used to call methods in the storage power actor. You also need to specify the method number for the method you want to invoke. Please refer to each method for its method number.
+
+### CreateMiner
+
+```go
+func CreateMiner(params CreateMinerParams) CreateMinerReturn {}
+```
+
+Create a new miner for the owner address and worker address.
+
+`uint` CreateMinerMethodNum = 1173380165.
+
+Params:
+
+* `struct` CreateMinerParams
+ * `bytes` Owner - the address of the owner.
+ * `bytes` Worker - the address of the worker.
+ * `RegisteredPoStProof` WindowPoStProofType - the type of RegisteredPoStProof.
+ * `bytes` Peer - peerID.
+ * `bytes[]` Multiaddrs - the multi-address which is used to control the newly created miner.
+
+Results:
+
+* CreateMinerReturn
+ * `bytes` IDAddress - The canonical ID-based address for the actor.
+ * `byte`: RobustAddress -A more expensive but re-org-safe address for the newly created actor.
+
+### NetworkRawPower
+
+```go
+func NetworkRawPower() NetworkRawPowerReturn {}
+```
+
+Return the total raw power of the network.
+
+`uint` NetworkRawPowerMethodNum = 931722534.
+
+Params:
+
+* null
+
+Results:
+
+* `int256` NetworkRawPowerReturn - the raw storage power of the whole network.
+
+### MinerRawPower
+
+```go
+func MinerRawPower(params MinerRawPowerParams) MinerRawPowerParams {}
+```
+
+Return the raw power claimed by the specified miner and whether the miner has more than the minimum amount of active storage.
+
+`uint` MinerRawPowerMethodNum = 3753401894.
+
+Params:
+
+* MinerRawPowerParams
+ * `uint64` Miner - Miner ID
+
+Results:
+
+* `struct` MinerRawPowerParams
+* `int256` RawBytePower - the row power of the miner.
+* `bool` MeetsConsensusMinimum - if the miner power meets the minimum for consensus.
+
+### MinerCount
+
+```go
+func MinerCount() MinerCountReturn {}
+```
+
+Returns the total number of miners created, regardless of whether or not they have any pledged storage.
+
+`uint` MinerRawPowerMethodNum = 3753401894.
+
+Params:
+
+* null
+
+Results:
+
+* `uint64` MinerCountReturn - the count of the miners that the caller address has.
+
+### MinerConsensusCount
+
+```go
+func MinerConsensusCount() MinerConsensusCountReturn {}
+```
+
+Returns the total number of miners that have more than the minimum amount of active storage.
+
+`uint` MinerConsensusCountMethodNum = 196739875.
+
+Params:
+
+* null
+
+Results:
+
+* `uint64`MinerConsensusCountReturn - the count of the miners that meet the consensus minimum that the caller address has.
+
+## Verified registry actor
+
+Verified registry actor is responsible for managing verified clients. The ActorCode for the verified registry built-in actor is `hex"0006"` which will be used to call the exported methods in the verified registry built-in actor. You need to specify the method number for the method you want to invoke. Please referer to each method for its method number.
+
+### AddVerifiedClient
+
+```go
+func AddVerifiedClient(params AddVerifiedClientParams) EmptyValue {}
+```
+
+To add a verified Client address to Filecoin Plus program.
+
+`uint` constant AddVerifierClientMethodNum = 3916220144.
+
+Params:
+
+* `struct` AddVerifierClientParams
+ * `bytes` Address - the verified client address
+ * `int256` Allowance - approved DataCap for this verified client
+
+Results:
+
+* `struct` EmptyValue.
+
+### RemoveExpiredAllocations
+
+```go
+func RemoveExpiredAllocations(params RemoveExpiredAllocationsParams) RemoveExpiredAllocationsReturn {}
+```
+
+Remove the expired DataCap allocations and reclaim those DataCap tokens back to the client. If the allocation amount is not specified, all expired DataCap allocations will be removed.
+
+`uint` RemoveExpiredAllocationsMethodNum = 2873373899.
+
+Params:
+
+* `struct` RemoveExpiredAllocationsParams
+ * `uint64` Client - the client address to remove the expired tokens from.
+ * `uint64[]` AllocationIDs - List of allocation IDs to attempt to remove. If empty, this method will remove all eligible expired tokens.
+
+Results:
+
+* `struct` RemoveExpiredAllocationsReturn
+ * `unit64[]`Considered - Allocation IDs are either specified by the caller or discovered to be expired.
+ * `BatachReturn` Results - results for each processed allocation.
+ * `int256` DataCapRecoverd - The amount of DataCap token reclaimed for the client.
+
+### GetClaims
+
+```go
+func GetClaims(params GetClaimsParams) GetClaimsReturn {}
+```
+
+Return a list of claims corresponding to the requested claim ID for a specific provider.
+
+`uint` GetClaimsMethodNum = 2199871187.
+
+Params:
+
+* `struct`GetClaimsParams
+ * `uint64` Provider - the provider address.
+ * `unit64[]` ClaimIDs - A list of Claim IDs for a specific provider.
+
+Results:
+
+* `struct` GetClaimsReturn
+ * `struct` BatchReturn
+ * `uint32` SuccessCount - total successes in the batch.
+ * `struct` FailCode\[] {`uint32` idx, `uint32` code} - list of failure code and index for all failures in batch.
+ * `struct Claim[]` Claims - list of Claims returned.
+ * `uint64` Provider - The provider that is storing the data.
+ * `uint64` Client - The client that originally allocated the DataCap.
+ * `bytes` Data - Identifier for the data committed.
+ * `uint64` Size - The size of the data.
+ * `int64` TermMin - The minimum period after the term starts, during which the provider must commit to storing data.
+ * `int64` TermMax - The maximum period after the term starts for which the provider can earn Quality Adjusted power for the data.
+ * `int64` TermStart - the epoch at which the piece was committed.
+ * `unit64` Sector - ID of the provider’s sector in which the data is committed.
+
+### ExtendClaimTerms
+
+```go
+func ExtendClaimTerms(params ExtendClaimTermsParams) ExtendClaimTermsReturn {}
+```
+
+Extends the maximum term of some claims up to the largest value they could have been originally allocated. This method can only be called by the claims’ client.
+
+`uint` ExtendClaimTermsMethodNum = 1752273514.
+
+Params:
+
+* `struct` ExtendClaimTermsParams
+ * `struct ClaimTerm[]` Terms
+ * `uint64` Provider - The provider address which stores the data.
+ * `uint64` CliamID - Claim ID.
+ * `int64` TermMax - The max chain epoch to extend.
+
+Results:
+
+* `struct` ExtendClaimTermsReturn
+ * `struct` BatchReturn
+ * `uint32` SuccessCount - total successes in the batch.
+ * `struct` FailCodes\[] {`uint32` idx, `uint32` code} - list of failure code and index for all failures in batch.
+
+### RemoveExpiredClaims
+
+```go
+func RemoveExpiredClaims(params: RemoveExpiredClaimsParams) RemoveExpiredClaimsReturn {}
+```
+
+To remove a claim with its maximum term has elapsed.
+
+`uint` RemoveExpiredClaimsMethodNum = 2873373899.
+
+Params:
+
+* `struct` RemoveExpiredClaimsParams
+ * `uint64` Provider - the provider address.
+ * `unit64[]` ClaimIDs - A list of Claim IDs with an expired term. If no claims are specified, all eligible claims will be removed.
+
+Results:
+
+* `struct` RemoveExpiredClaimsReturn
+ * `uint64[]` Considered - a list of IDs of the claims that were either specified by the caller or discovered to be expired.
+ * `struct` BatchReturn
+ * `uint32` SuccessCount - total successes in the batch
+ * `struct` FailCodes\[] {`uint32` idx, `uint32` code} - list of failure code and index for all failures in batch.
diff --git a/reference/exchanges/README.md b/reference/exchanges/README.md
new file mode 100644
index 000000000..a1b3eb213
--- /dev/null
+++ b/reference/exchanges/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ This section contains reference information regarding how to integrate
+ Filecoin into an exchange.
+---
+
+# Exchanges
+
diff --git a/reference/exchanges/exchange-integration.md b/reference/exchanges/exchange-integration.md
new file mode 100644
index 000000000..f7bcff7c0
--- /dev/null
+++ b/reference/exchanges/exchange-integration.md
@@ -0,0 +1,283 @@
+---
+description: >-
+ This page lists the general steps and workflows you need to follow to offer
+ FIL on an exchange.
+---
+
+# Exchange integration
+
+## Running a Filecoin node
+
+If you plan to offer FIL on your exchange, you will need to run a Filecoin node. [Lotus](https://lotus.filecoin.io) is the reference implementation node for the Filecoin network, and as such, is currently the most production-ready implementation available.
+
+### Node setup and installation
+
+Follow the [Lotus installation guide](https://lotus.filecoin.io/lotus/install/prerequisites) to properly install the Lotus applications and launch node. The basic steps are:
+
+* Prepare your hardware by meeting the minimal requirements.
+* Install dependencies.
+* Start the Lotus daemon and sync the chain by either:
+ * Syncing from scratch: in your lotus directory run `lotus daemon`
+ * Syncing from a full snapshot.
+ * Syncing from minimal snapshot
+
+A snapshot only has the state trees from the recent tipset (2000 epochs) onward and nothing before that tipset, which means it does not have all the historical states of the network. In addition, only a full snapshot has full state trees from a certain tipset.
+
+### Check sync status
+
+You can check the sync status by running `lotus sync status`. You are fully synced when the `Height` difference is `0`. Lotus will output any sync error.
+
+```shell
+lotus sync status
+
+> worker 28975:
+> Base: [bafy2bzacec2exqtdvxzzvikwg3sg67e57yaocpmrljkfxth3isjmruzd45vsa bafy2bzacecbbqdvidaf56eeth7oul2raycjpddvgtb23ywoyrqyh7ajulepai bafy2bzaceajtc3mgie5b72ivuiv3svwnbfq7kl2gnbni5tqc2ja6zi22vkttu bafy2bzacebf7oavffgiajf5goi6verhepsclhb7pcfwirkptu4dxrdoqshadw bafy2bzaceddtymcr5onnh63uznctks5hujpgzxcd45f5ef7oiuqocpmqs6rh2 bafy2bzacebuqm43mspki4ekdbu6xj663mpkrcoxtdss64t27fo77bg7pzbaia bafy2bzaceaxurgudmnphjcoz5sfad7yd63axyhmretqr5jjmvnicyu7betvvi bafy2bzaceali3oihrslxmnjdu2ysesxradorczjimruxdbkqidtcuag7wlr5a]
+> Target: [bafy2bzacedmswd73yn5faqzpbu5ofqinmh6fnzxfvazy52jrfthfhod4ssfgw] (1272951)
+> Height diff: 1
+> Stage: complete
+> Height: 1272951
+> Elapsed: 243.770396ms
+> worker 28976:
+> Base: [bafy2bzacedmswd73yn5faqzpbu5ofqinmh6fnzxfvazy52jrfthfhod4ssfgw]
+> Target: [bafy2bzacebxf27yxmzksno5ajkvp6shwojrx5mxsrtmlchrdchaqfg2wo7o3i] (1272951)
+> Height diff: 0
+> Stage: complete
+> Height: 1272951
+> Elapsed: 1.00020144s
+> worker 28977:
+> Base: []
+> Target: [] (0)
+> Height diff: 0
+> Stage: idle
+> Height: 0
+```
+
+You can run `lotus sync wait` to wait for the sync to be complete. Lotus will output `Done!` once your node is fully synced.
+
+## Basic network technology info
+
+The Filecoin network uses a [Proof of Storage (PoRep)](https://spec.filecoin.io/#section-glossary.proof-of-replication-porep) + [Proof of SpaceTime (PoSt)](https://spec.filecoin.io/#section-glossary.proof-of-spacetime-post) consensus algorithm. Time in the Filecoin network is dissected into [epochs](https://spec.filecoin.io/#section-glossary.epoch) set to 30 seconds. A new set of blocks is produced for every epoch for a [tipset](https://spec.filecoin.io/#section-glossary.tipset). The hard finality of the Filecoin network is 900 epochs.
+
+## Accounts and wallets
+
+Filecoin uses an account-based model. There are 4 types of account prefixes:
+
+* `f0` for ID address
+* `f1` for Secp256k1 wallets
+* `f2`for [actor](https://spec.filecoin.io/#section-glossary.actor) accounts
+* `f3` for BLS wallets
+
+`f1`, `f2`, and `f3` prefixed addresses are called account addresses. An account address is activated when it first receives a transaction. `f0` prefixed addresses are mapped to each active account address.
+
+**Testnet addresses** Within a testnet, the address prefix is `t`. So ID addresses become `t0`, Secp256k1 wallets become `t1`, etc.
+
+## Signatures
+
+Filecoin currently uses two types of signatures:
+
+* ECDSA signatures over the Secp256k1 elliptic curve
+* BLS signatures over the BLS12-381 group of curves.
+
+Details and reference implementations can be found [in the Filecoin specification](https://spec.filecoin.io/#section-algorithms.crypto.signatures.signature-types).
+
+## Messages
+
+There are two message types:
+
+* [Signed messages](https://github.com/filecoin-project/lotus/blob/9deda06ec632da3f7a035cc63b9408de72c96f79/chain/types/signedmessage.go#L44)
+* [Unsigned messages](https://github.com/filecoin-project/lotus/blob/9deda06ec632da3f7a035cc63b9408de72c96f79/chain/types/message.go#L28).
+
+Messages are fully irreversible at 900 epochs. Waiting 200 epochs for message confirmation is acceptable.
+
+There are multiple gas fees associated with each message. Refer to the [practical guide to gas section of this blog post](https://filecoin.io/blog/filecoin-features-gas-fees/) for details.
+
+An `ExitCode` of `0` in the message receipt indicates that the message was sent successfully.
+
+### Mempool
+
+When a user sends a transaction to the network, it gets placed into the mempool queue. If a transaction doesn’t have enough gas, it stays in the mempool and doesn’t go anywhere. To new users, it looks like this transaction is lost forever. However, users can update the transaction with an updated `GasLimit`, `GasFeeCap`, and/or `GasPremium`. As long as you don’t change anything else in the transaction (`nonse`, `to`, `from`, `value`), then the transaction that is sat in the mempool will get updated with the new gas allowance.
+
+**Expiration**
+
+There is no limit for how long a message can spend in the mempool. However, the mempool does get _cleaned_ when there are too many messages in it, starting with the messages with the least gas.
+
+### Automatic gas values
+
+When `GasFeeCap`, `GasPremium` and `MaxFee` are set to `0`, Lotus will do the gas estimation for the message with 25% overestimation for the gas limit based on the current network condition.
+
+Some JavaScript libraries attempt to estimate the gas fees before sending the transaction to the Filecoin network. However, they sometimes underestimate, leading to transactions getting stuck in the mempool. If you are noticing your transactions getting stuck in the mempool after sending them to the network using a JavaScript library, try `GasFeeCap`, `GasPremium`, and `MaxFee` to `0`.
+
+## Integration
+
+You can interact with the network by using Lotus CLI or using the [JSON-RPC APIs](https://lotus.filecoin.io/reference/basics/overview/). Follow the [API tokens guide](https://lotus.filecoin.io/docs/developers/api-access/) to set up API tokens on your node and grant necessary permissions. To find all CLI usage, run `lotus -h` in your lotus folder.
+
+You can find some other API client libraries developed by the Filecoin community [within the API client libraries page](https://lotus.filecoin.io/docs/developers/api-access/#api-client-libraries).
+
+### API examples
+
+Here are some Curl examples for connecting to a Lotus node using the JSON-RPC API:
+
+[**ChainHead**](https://lotus.filecoin.io/reference/lotus/chain/#chainhead)
+
+```shell
+curl -X POST -H "Content-Type: application/json" -H "Authorization: Bearer $(cat ~/.lotus/token)" --data '{
+ "jsonrpc":"2.0",
+ "method":"Filecoin.ChainHead",
+ "params":[
+
+ ],
+ "id":0
+}' http://127.0.0.1:1234/rpc/v0
+```
+
+[**ChainGetTipSetByHeight**](https://lotus.filecoin.io/reference/lotus/chain/#chaingettipset)
+
+```shell
+curl -X POST -H "Content-Type: application/json"
+ -H "Authorization: Bearer $(cat ~/.lotus/token)"
+ --data '{
+ "jsonrpc":"2.0",
+ "method":"Filecoin.ChainGetTipSetByHeight",
+ "params":[
+ 100000,
+ [
+ {
+ "/":"bafy2bzacecxm6lhhzem3wshktatwzrcqbvc3k3jepzz7a6wqyc7w3fvav256i"
+ }
+ ]
+ ],
+ "id":0
+}' http://127.0.0.1:1234/rpc/v0
+```
+
+`bafy2bzacecxm6lhhzem3wshktatwzrcqbvc3k3jepzz7a6wqyc7w3fvav256i` is the block CID. This field is nullable.
+
+[**ChainGetParentMessages**](https://lotus.filecoin.io/reference/lotus/chain/#chaingetparentmessages)
+
+```shell
+curl -X POST -H "Content-Type: application/json"
+ -H "Authorization: Bearer $(cat ~/.lotus/token)"
+ --data '{
+ "jsonrpc":"2.0",
+ "method":"Filecoin.ChainGetParentMessages",
+ "params":[
+ {
+ "/":"bafy2bzacedplsg3tqrv7e3v5rssvq3qwbd3c6g3en55zpqnyrymexhynz6ixu"
+ }
+ ],
+ "id":0
+}' http://127.0.0.1:1234/rpc/v0
+```
+
+`bafy2bzacedplsg3tqrv7e3v5rssvq3qwbd3c6g3en55zpqnyrymexhynz6ixu` is the block CID. You can pass in any one of the block CIDs included in the desired tipset.
+
+[**WalletNew**](https://lotus.filecoin.io/reference/lotus/wallet/#walletnew)
+
+```shell
+curl -X POST -H "Content-Type: application/json"
+ -H "Authorization: Bearer $(cat ~/.lotus/token)"
+ --data '{
+ "jsonrpc":"2.0",
+ "method":"Filecoin.WalletNew",
+ "params":[
+ 1
+ ],
+ "id":1
+}' http://127.0.0.1:1234/rpc/v0
+```
+
+`1` for Secp2561K account and `2` for BLS account.
+
+[**WalletBalance**](https://lotus.filecoin.io/reference/lotus/wallet/#walletbalance)
+
+```shell
+curl -X POST -H "Content-Type: application/json"
+ -H "Authorization: Bearer $(cat ~/.lotus/token)"
+ --data '{
+ "jsonrpc":"2.0",
+ "method":"Filecoin.WalletBalance",
+ "params":[
+ “f1d7x4euqwtlk2bqzhclr6gubkufezgddkqftsnky”
+ ],
+ "id":1
+}' http://127.0.0.1:1234/rpc/v0
+```
+
+`f1d7x4euqwtlk2bqzhclr6gubkufezgddkqftsnky` is the account address.
+
+[**MpoolPushMessage**](https://lotus.filecoin.io/reference/lotus/mpool/#mpoolpushmessage)
+
+```shell
+curl -X POST -H 'Content-Type: application/json'
+ -H "Authorization: Bearer $(cat ~/.lotus/token)"
+ --data '{
+ "jsonrpc":"2.0",
+ "method":"Filecoin.MpoolPushMessage",
+ "params":[
+ {
+ "To":"t14pu4ogatxvok5727bd7ikp52mnnpxxxqicwyx7i",
+ "From":"t3w5afgpwisxkryfb676h7xyku57uupigdh3zdoxj3kyep53hmo5njykfatqx5rga75ra5oucbyczr7lbjaezq",
+ "Value":"1000000000000000000001",
+ "Method":0,
+ "Params":null,
+ "GasLimit":20000000,
+ "GasFeeCap":"0",
+ "GasPremium":"0",
+ "Nonce":0
+ },
+ {
+ "MaxFee":"0"
+ }
+ ],
+ "id":0
+}' http://127.0.0.1:1234/rpc/v0
+```
+
+`Method` ID of `0` with null `Params` is a balance transfer transaction. When the `GasFeeCap`, `GasPremium` and `MaxFee` is `0`, Lotus will do the gas estimation for the message with a 25% overestimation for the gas limit based on the current network condition. You can change this value via the `GasLimitOverestimation` field.
+
+## FAQ
+
+### How do I sign a message?
+
+Use [WalletSign](https://lotus.filecoin.io/reference/lotus/wallet/#walletsign) to sign the message and send the signed message using [MpoolPush](https://lotus.filecoin.io/reference/lotus/mpool/#mpoolpush).
+
+You may also use this [Filecoin signing tool library](https://github.com/Zondax/filecoin-signing-tools), written by [Zondax](https://www.zondax.ch/).
+
+### How do I retrieve the gas fees of a message?
+
+Call [StateReplay](https://lotus.filecoin.io/reference/lotus/state/#statereplay) and look up the `GasCost` in the response.
+
+### How to get the gas estimation of a message?
+
+You can estimate the gas cost of a message by calling [GasEstimateMessageGas](https://lotus.filecoin.io/reference/lotus/gas/#gasestimatemessagegas). This API estimates the gas limit with a 25% overestimation based on the network condition under the given tipset key. You can change this value via the `GasLimitOverestimation` field.
+
+### How do I ensure that all balances transfer in any messages are captured, including msig transfers?
+
+Call [StateCompute](https://lotus.filecoin.io/reference/lotus/state/#statecompute) or [StateReplay](https://lotus.filecoin.io/reference/lotus/state/#statereplay) and go through all the transactions in the execution trace. Whenever the value `!=0 && exit code == 0`, it indicates a balance transfer has occurred.
+
+### How can I check if my transaction is stuck?
+
+The Lotus RPC method to retrieve the list of transactions waiting on the mempool is `Filecoin.MpoolPending`. The RPC call is:
+
+```json
+{
+ "jsonrpc": "2.0",
+ "method": "Filecoin.MpoolPending",
+ "id": 1,
+ "params": [null]
+}
+```
+
+If you are using a JavaScript library, the method you need is `mpoolPending`.
+
+## Join the Filecoin Slack
+
+Join the [Filecoin Slack](https://filecoinproject.slack.com/signup) and post any questions you have in there.
+
+## Useful Links
+
+* [Filecoin website](https://filecoin.io)
+* [Filecoin Specs](https://spec.filecoin.io)
+* [Filecoin Networks](https://status.filecoin.io)
+* [Lotus](https://github.com/filecoin-project/lotus) - the reference implementation for the Filecoin network
+* [Block Explorers](https://docs.filecoin.io/networks/mainnet/explorers/)
diff --git a/reference/general/README.md b/reference/general/README.md
new file mode 100644
index 000000000..f6e52639a
--- /dev/null
+++ b/reference/general/README.md
@@ -0,0 +1,211 @@
+---
+description: >-
+ Helpful reference materials for the Filecoin specification, implementations,
+ and ecosystem.
+---
+
+# General
+
+## About Filecoin
+
+* [**Filecoin Specification**](https://spec.filecoin.io/) - technical specification for Filecoin protocol and its associated subsystems.
+* [**Engineering Filecoin’s Economy**](https://filecoin.io/2020-engineering-filecoins-economy-en.pdf) - the design of Filecoin’s economic incentives.
+* [**Filecoin Slack**](https://filecoin.io/slack) - announcement, open discussion of Filecoin.
+* [**Filecoin Community**](https://github.com/filecoin-project/community) - Filecoin community and ecosystem channels, discussion forums, and more.
+* [**Filecoin 中文社区**](https://github.com/filecoin-project/community-china) - resources and forum for the Chinese community, maintained and moderated by CoinSummer & PL.
+* [**Filecoin YouTube channel**](https://www.youtube.com/channel/UCPyYmtJYQwxM-EUyRUTp5DA) - various Filecoin workshops, conference talks, and meetups.
+
+## Filecoin Network
+
+### Node implementations
+
+* [**Lotus**](https://github.com/filecoin-project/lotus) - _**Recommended**_ most advanced implementation and [security audited](https://spec.filecoin.io/#section-intro.implementations-status), in GoLang.
+* Other implementations (In progress):
+ * [Fuhon](https://github.com/filecoin-project/cpp-filecoin) - C++
+ * [Forest](https://github.com/ChainSafe/forest) - Rust
+ * [Venus](https://github.com/filecoin-project/venus) (formerly `go-filecoin`) - also in GoLang ([Venus documentation](https://venus.filecoin.io/)).
+* Lotus Docker Hub Images (unofficial):
+ * [`glif/lotus`](https://github.com/glifio/filecoin-docker)
+ * [`ognots/lotus`](https://hub.docker.com/r/ognots/lotus) - `mainnet-v1.830-rc1`
+ * [`textile/lotus`](https://hub.docker.com/r/textile/lotus) - latest
+
+### Networks
+
+[`network.filecoin.io`](https://network.filecoin.io) - lists all current Filecoin networks and related information.
+
+* **Mainnet**
+ * [Mainnet network status](https://filecoin.statuspage.io/) - reports the status and incident of the Filecoin Mainnet.
+* **Calibration**
+ * Largest testnet which support 32 GiB and 64 GiB sectors.
+ * See [`#fil-net-calibration-announce`](https://filecoinproject.slack.com/archives/C01C5PT7ETC) for announcements and [`#fil-net-calibration-discuss`](https://filecoinproject.slack.com/archives/C01D42NNLMS) for questions and discussion in [Filecoin Slack](https://filecoin.io/slack).
+* **Local Devnet**
+ * [Run a Local Devnet using Lotus](https://lotus.filecoin.io/lotus/developers/local-network/).
+ * [Textile’s local devnet](https://docs.textile.io/powergate/localnet/#localnet-with-lotus-client) - uses a mocked sector builder for faster deal confirmation and it is for storage app prototyping.
+ * [Ganache for Filecoin](https://www.trufflesuite.com/docs/filecoin/ganache/overview) - local network to support development with Filecoin by various ways.
+
+## Network status
+
+Tools to check status and details of the network and chain.
+
+### Block explorers
+
+* [`Filfox.info`](https://filfox.info/en) - Mainnet
+* [Filscan](https://filscan.io) - Mainnet & Calibration
+* [Filscout](https://filscout.com) - Mainnet & Calibration
+* [Grafana](https://stats.filecoin.io/) - Network Statistics Dashboard
+ * [Mainnet](https://stats.filecoin.io/)
+ * [Calibration](https://stats.calibration.fildev.network/d/z6FtI92Zz/chain?orgId=1\&refresh=25s\&from=now-30m\&to=now\&kiosk)
+
+### Chain data and monitoring
+
+* [`dashboard.starboard.ventures`](https://dashboard.starboard.ventures/) - network health monitoring
+* [Filstats.io](https://filstats.io) - node telemetry monitoring - add your node!
+* Orphan blocks (not an official protocol term in the Filecoin Spec) - Blocks without rewards that were either mined on the wrong tipset that the network fails to deliver in time, or a mix resulting in reorgs.
+ * [Orphan Block Statistics](https://filscout.com/en/orphan-block)
+ * [Orphan Block List](https://filscout.com/en/orphan-block/alllist)
+* [Sentinel Project](https://lilium.sh/) - Filecoin Network Monitoring and Analysis System
+
+### Storage/Deals status
+
+* [`storage.filecoin.io`](https://storage.filecoin.io/) - general storage summary of Filecoin
+* [`filecoin.tools`](https://filecoin.tools) - check your CID’s storage deal status. Also provides an API - check [`filecoin.tools/docs`](https://filecoin.tools/docs) for reference
+* [`file.app`](https://file.app/) - Filecoin storage provider analytics
+* [Deals list at `Filfox.io`](https://filfox.info/en/deal)
+
+## Storage Web Applications
+
+Web-based applications that store your data on Filecoin. No command-line or coding experience required.
+
+* [Estuary](https://estuary.tech) allows uploading and storing content on the Filecoin network directly from your browser. Allows anyone with public data to store and retrieve using a few API calls.
+* [**Slate.host**](https://slate.host) - a storage application on Filecoin to collect, organize, and link files together and share them, listed on [Product Hunt here](https://www.producthunt.com/posts/slate-f195dcdd-18e2-4dc2-8c70-45208ccbb862) on GitHub at [`filecoin-project/slate`](https://github.com/filecoin-project/slate/)
+* [ChainSafe Files](https://files.chainsafe.io/) - Dropbox-style UI, login with OAuth or general MetaMask
+* [`File.video`](https://file.video/) - video hosting with decentralized transcoding from LivePeer
+* [Starling Framework for Data Integrity](https://www.starlinglab.org/)
+ * to securely capture, store and verify human history
+ * learn more at [`starlinglab.org/78days`](https://www.starlinglab.org/78days/) or the [Filecoin blog interview](https://filecoin.io/blog/starling-framework/)
+ * [`Starlingstorage.io`](https://starlingstorage.io/) - API + CLI that simplified storing data to a local Lotus node for preservation use cases.
+
+## APIs & Developer tools
+
+Developer tools, API clients & storage services that developers can use to build on Filecoin.
+
+### Storage APIs for app builders
+
+* [Estuary](https://estuary.tech) - a simple IPFS node that integrates with Filecoin. For infrastructure operators, developers and users.
+ * [Estuary documentation](https://docs.estuary.tech)
+ * [Estuary www](https://github.com/application-research/estuary-www) - a simple web app example
+ * [Estuary Rclone](https://docs.estuary.tech/Learn/tutorial-managing-files-with-rclone) - sync to and from different cloud storage providers and Estuary
+* [`NFT.storage`](https://nft.storage/) - beta service from Protocol Labs for storing off-chain _NFT_ data on IPFS and Filecoin.
+* [`Web3.storage`](https://web3.storage/) - beta service from Protocol Labs for storing off-chain _dApp_ data on IPFS and Filecoin, with help from Pinata (a fast IPFS Pinning Service) to [distribute the content across IPFS](https://www.pinata.cloud/blog/protocol-labs-and-pinata)
+* [`Slate.host`](https://github.com/filecoin-project/slate/#developer-api) - has a Developer API that allows you upload files with an account
+* [`Textile.io's tools`](https://docs.textile.io/) - suite of tools for interacting with IPFS and Filecoin
+ * includes The Hub, Buckets, ThreadsDB, Powergate, [Tableland](https://blog.textile.io/tableland-is-coming-build-web3-with-sql/) (new) and Filecoin bridges to Polygon and NEAR
+ * [Powergate](https://docs.textile.io/powergate/) - infrastructure tool for using IPFS for hot storage and retrieval and Filecoin for cold, with deal helpers and other convenience features
+ * [Textile Buckets](https://docs.textile.io/buckets/) - simplified cloud bucket store on IPFS with archive to Filecoin option using Powergate under the hood
+* [Fleek’s tools](https://fleek.co/) - provides access to Fleek’s hosted services using IPFS, Textile, Filecoin, Ethereum, etc.
+ * [Space SDK](https://docs.fleek.co/space-sdk/overview/) - modular JavaScript/Typescript library and set of interfaces that packages tools and features enabled by Open Web protocols like IPFS, Textile, Filecoin, and Ethereum.
+ * [Space Daemon](https://docs.fleek.co/space-daemon/overview/) - packages together IPFS, Textile Threads and Buckets, and Textile Powergate into one easy to install background service
+
+### Message signing tools
+
+* [Filecoin Signing Tools](https://github.com/Zondax/filecoin-signing-tools) - _**Recommended**_ a pure JS or Rust / WASM / JSONRPC library for creating signed messages apart from a Filecoin node.
+ * Also available on npm at [`@zondax/filecoin-signing-tools`](https://www.npmjs.com/package/@zondax/filecoin-signing-tools)
+ * For an open source example of how to use it see the [Glif web wallet](https://github.com/glifio/wallet) on GitHub.
+* [Filecoin JS Signer](https://github.com/blitslabs/filecoin-js-signer) - \[_last update: June 2021_] pure TS / JS library for creating signed messages and interacting with Filecoin’s built-in Actors (like Payment Channel and Multisig), used by the [Filecoin Loans grant project](https://github.com/blitslabs/filecoin.loans-monorepo).
+ * On npm at [`@blitslabs/filecoin-js-signer`](https://www.npmjs.com/package/@blitslabs/filecoin-js-signer)
+
+### Wallet-related tools
+
+* [Filecoin Rosetta API Proxy](https://github.com/Zondax/rosetta-filecoin) - [Rosetta](https://www.rosetta-api.org/) is an API standard created by Coinbase for a consistent interface to many chains for wallets and exchanges.
+* [FilSnap MetaMask Plugin](https://metamask.io/flask/) - MetaMask has a new plugin system currently still in beta that developers can try out. The MM UI will not change but Filecoin keys are in the MM vault. Try it with [https://filsnap.chainsafe.io/](https://filsnap.chainsafe.io/)
+
+### Node Infrastructure & APIs
+
+> NOTE: making deep calls into the chain’s history may take some time to return and it may be more efficient to use a chain database (e.g. used by block explorers) that stores the chain’s history and is optimized for queries.
+
+* [Glif nodes](https://lotus.filecoin.io/lotus/developers/glif-nodes/) and [Infura](https://docs.infura.io/infura/networks/filecoin) - Hosted endpoints to Filecoin mainnet and testnet.
+ * These endpoints support read-only calls and `MPoolPush()` for sending signed transactions to the network (which can be signed using the [Message signing tools](https://docs.filecoin.io/reference/general/overview/#message-signing-tools)).
+* [**Lotus JSON-RPC API**](https://lotus.filecoin.io/lotus/get-started/what-is-lotus/) - Lotus offers the full feature set of its capabilities through API.
+ * [lotus API Postman sample](https://documenter.getpostman.com/view/4872192/SWLh5mUd?version=latest) - (shows sample wallet calls only)
+
+**Scalable endpoint hosting**
+
+For running a node cluster of load balanced Lotus JSON RPC API endpoints.
+
+* [Filecoin-chart](https://github.com/glifio/filecoin-chart) (k8 cluster) - Helm chart for hosting Lotus Node clients.
+
+**Filecoin API clients**
+
+* [`Filecoin.js`](https://github.com/filecoin-shipyard/filecoin.js) (outdated) - higher-level JS library for interacting with Lotus via JSON-RPC API.
+* [`js-lotus-client`](https://github.com/filecoin-shipyard/js-lotus-client) - lower-level JS wrapper for basic parsing of the Lotus JSON RPC API.
+* [`lotus-json-rpc-provider`](https://www.npmjs.com/package/@coinsummer/lotus-jsonrpc-provider) (outdated) - wraps the Lotus API in TypeScript.
+
+**Storage provider index API**
+
+* [Filrep.io API](https://filrep.io/api) - A suite of RESTFul JSON endpoints to discover the best storage provider to make deals with.
+* [Textile storage provider Index](https://blog.textile.io/introducing-the-miner-index/) - API and CLI to find storage providers by price, observed deals, speed from North American nodes.
+
+### Data prep tools
+
+* [CAR files](https://ipld.io/specs/transport/car/) - automatically used on Lotus import for less than 32 and 64 GiB, Filecoin archive format serialized from DAGs, see also [offline deals for large datasets](https://lotus.filecoin.io/tutorials/lotus/large-files/).
+* [`go-graphsplit`](https://github.com/filedrive-team/go-graphsplit) - FileDrive chunks a larger single DAG for storage in default Filecoin 32 and 64 GiB sector sizes.
+* [IPFS](https://ipfs.tech) - you can use `ipfs add` with an IPFS node then [add the CID from a Filecoin node](https://lotus.filecoin.io/tutorials/lotus/import-data-from-ipfs/).
+
+### Databases using IPFS and Filecoin
+
+* [Tableland](https://tableland.xyz/) - (new) create familiar SQL tables to read and write dynamic NFT metadata or application data for dApps, from Textile
+* [OrbitDB](https://orbitdb.org/) - decentralized multi-party database on IPFS with multi-party syncing with topic pubsub and CRDTs. Filecoin integration using Powergate available at [`orbit-db-powergate-io`](https://github.com/filecoin-shipyard/orbit-db-powergate-io).
+* [ThreadsDB](https://docs.textile.io/threads/) - decentralized multi-party database for user-siloed data on IPFS, from Textile
+
+### Other developer tools
+
+* [`js-rle`](https://github.com/willscott/js-rle) - RLE+ Spec. Learn about [`rle-bitset-encoding` in the Filecoin Spec](https://spec.filecoin.io/#section-appendix.data\_structures.rle-bitset-encoding).
+* [Truffle for Filecoin](https://www.trufflesuite.com/docs/filecoin/truffle/quickstart) - building apps to preserve files to IPFS and Filecoin.
+
+## Storage providers
+
+### Storage provider reputation systems
+
+* [Codefi Storage](https://storage.codefi.network/) - view the Filecoin Storage Market, storage provider info, asking price, completed deals.
+* [FIL Swan](https://www.filswan.com/) - for offline deals, storage provider info, prices, offline deal acceptance.
+* [Filrep.io](https://filrep.io/) - Index of online storage providers and their pricing, ranked by power and reputation score.
+* [SpaceGap](https://spacegap.github.io/) - shows storage proof deadlines and sector details for the top 50 storage providers.
+
+### Storage provider tools
+
+* [Bidbot](https://github.com/textileio/bidbot) - A Filecoin Network sidecar to bid in storage deal auctions.
+* [Filgas.io](https://fgas.io/) - real-time Filecoin mining gas queries.
+* [Lotus Farcaster](https://github.com/s0nik42/lotus-farcaster) - Prometheus, Grafana and Python monitoring dashboard.
+
+### Storage client and miner programs
+
+* [Filecoin storage providers (SPs)](https://sp.filecoin.io/) - Interested in becoming an SP? A boot camp, grants and expert advice is available here!
+* [Filecoin Plus Verified Data Program](https://github.com/filecoin-project/filecoin-plus-client-onboarding)
+ * incentivizes valuable data stored on Filecoin with a social trust network for verified data
+ * Clients can apply to Notaries to receive DataCap and incentivize storage providers at 10x to their quality-adjusted power, increasing probability of block rewards
+* [Slingshot Competition](https://slingshot.filecoin.io/) - ended in 2021 and has now become [Filecoin Evergreen](https://evergreen.filecoin.io/)
+
+### Retrieval Market resources (WIP experiments)
+
+The Retrieval Market is in early development; research and development teams are actively exploring improvements.
+
+* [Browser Retrieval Client](https://github.com/filecoin-shipyard/browser-retrieval)
+* [Browser Retrieval Client using WASM](https://github.com/jimpick/lotus-retrieve-api-daemon)
+* [ChainSafe’s Full Node Retrieval Client](https://github.com/ChainSafe/fil-secondary-retrieval-markets)
+* [Decentralized Data Delivery Markets - Open Problems and RFCs](https://github.com/protocol/ResNetLab/blob/master/OPEN\_PROBLEMS/DECENTRALIZED\_DATA\_DELIVERY\_MARKETS.md)
+* [July 2020 Retrieval Markets Workshop](https://www.youtube.com/watch?v=eUcsZ1JS9pM) - list of [Sessions](https://docs.google.com/document/d/17bEHP2CHkQFYYQnl7YpatAOh6kWxjlNgihdc4F66SVM/edit)
+
+## Ecosystem galleries
+
+* [**Community Projects Showcase**](https://github.com/filecoin-project/community/#ecosystem-projects) - updated news about ecosystem projects
+* [Filecoin Ecosystem](https://ecosystem.filecoin.io/)
+
+### Hackathons
+
+* [Upcoming Hackathons](https://hackathons.filecoin.io/)
+ * Keep a lookout for upcoming hackathons!
+ * All winners are eligible for [Next Steps Grants](https://github.com/filecoin-project/devgrants/blob/master/microgrants/microgrants.md) after the Hackathon!
+
+### Grants and accelerators
+
+* [**Filecoin Developer Grants**](https://filecoin.io/grants) - ongoing monthly developer grant program to support open source projects and new [RFPs](https://github.com/filecoin-project/devgrants/tree/master/rfps) on Filecoin, funded by the [Filecoin Foundation](https://fil.org)
diff --git a/reference/general/glossary.md b/reference/general/glossary.md
new file mode 100644
index 000000000..6f0874e73
--- /dev/null
+++ b/reference/general/glossary.md
@@ -0,0 +1,185 @@
+---
+description: Definitions and usage for Filecoin terminology
+---
+
+# Glossary
+
+## Address
+
+In the Filecoin network, an _address_ is a unique cryptographic value that serves to publicly identify a user. This value, a public key, is paired with a corresponding private key. The mathematical relationship between the two keys is such that access to the private key allows the creation of a signature that can be verified with the public key. Filecoin specifically employs the Boneh–Lynn–Shacham (BLS) signature scheme for this purpose.
+
+## Block
+
+In a blockchain, a _block_ is the fundamental unit of record. Each block is cryptographically linked to one or more previous blocks. Blocks typically contain [messages](https://docs.filecoin.io/reference/general/glossary/#message) relating changes to some state (for example, financial records) tracked by the blockchain.
+
+## Blockchain
+
+Fundamentally, a _blockchain_ is a system of record in which new records, or [blocks](https://docs.filecoin.io/reference/general/glossary/#block) are cryptographically linked to preceding records. This construction is a foundational component of secure, verifiable, and distributed transaction ledgers.
+
+## Block height
+
+The _height_ of a [block](https://docs.filecoin.io/reference/general/glossary/#block) corresponds to the number of [epochs](https://docs.filecoin.io/reference/general/glossary/#epoch) elapsed before the block was added to the blockchain. The height of the Filecoin [blockchain](https://docs.filecoin.io/reference/general/glossary/#blockchain) is defined to be the maximum height of any block in the blockchain.
+
+## Capacity commitment
+
+If a storage provider doesn’t find any available deal proposals appealing, they can alternatively make a _capacity commitment_, filling a [sector](https://docs.filecoin.io/reference/general/glossary/#sector) with arbitrary data, rather than with client data. Maintaining this sector allows the storage provider to provably demonstrate that they are reserving space on behalf of the network.
+
+## CommP
+
+The commitment phase of the Proof-of-Replication (PoRep) process. PoRep is a mechanism used to verify that a storage provider is storing data on behalf of a client by requiring the provider to prove that they have replicated the client’s data to their storage space.
+
+## Content IDentifier (CID)
+
+A self-describing format for referencing data in distributed information systems by it’s _contents_, rather than its _location_ using cryptographic hashing and self-describing formats. It is a core component of IPFS and IPLD, which are in turn components of Filecoin.
+
+## Collateral
+
+In order to enter into a [storage deal](https://docs.filecoin.io/reference/general/glossary/#deal), a [storage provider](https://docs.filecoin.io/reference/general/glossary/#storage-provider) is required to provide [FIL](https://docs.filecoin.io/reference/general/glossary/#fil) as _collateral_, to be paid out as compensation to a client in the event that the provider fails to uphold their storage commitment.
+
+## Deal
+
+Two participants in the Filecoin network can enter into a _deal_ in which one party contracts the services of the other. The Filecoin specification currently details _storage deals_ (in which one party agrees to store data for the other for a specified length of time) and _retrieval deals_ (in which one party agrees to transmit specified data to the other).
+
+## Election
+
+Every [epoch](https://docs.filecoin.io/reference/general/glossary/#epoch), a small subset of Filecoin [storage providers](https://docs.filecoin.io/reference/general/glossary/#storage-provider) are _elected_ to mine a new [block](https://docs.filecoin.io/reference/general/glossary/#block) for the Filecoin blockchain. A provider’s probability of being elected is roughly proportional to the share of the Filecoin network’s total storage capacity that they contribute.
+
+## Epoch
+
+Time in the Filecoin blockchain is discretized into _epochs_ that are currently thirty seconds in length. Every epoch, a subset of storage providers are elected to each add a new block to the Filecoin blockchain via [Winning Proof-of-Spacetime](https://docs.filecoin.io/reference/general/glossary/#winning-proof-of-spacetime-winningpost).
+
+## FIL
+
+_FIL_ is the name of the Filecoin unit of currency; it is alternatively denoted by the Unicode symbol for an integral with a double stroke (⨎).
+
+## Faucet
+
+A _faucet_ is a service that provides free [FIL](https://docs.filecoin.io/reference/general/glossary/#fil). Typically, faucets are run for the benefit of new users in a network, providing them with the necessary seed capital to begin making transactions.
+
+## Fault
+
+When a [storage provider](https://docs.filecoin.io/reference/general/glossary/#storage-provider) fails to complete [Window Proof-of-Spacetime](https://docs.filecoin.io/reference/general/glossary/#window-proof-of-spacetime-windowpost) for a given sector, the Filecoin network registers a _fault_ for that sector, and the provider is [_slashed_](https://docs.filecoin.io/reference/general/glossary/#slash). If a storage provider does not resolve the fault quickly, the network assumes they have abandoned their commitment.
+
+## Filecoin
+
+The term _Filecoin_ is used generically to refer to the Filecoin project, protocol, and network.
+
+## Finality
+
+Finality refers to the immutability of messages and state recorded to the Filecoin blockchain. As new blocks are added to the blockchain, it becomes more and more difficult for older blocks to be altered, until they become effectively impossible to modify. The _finality period_ is the amount of time that must elapse before a block is considered completely immutable. In the current [mainnet](https://docs.filecoin.io/reference/general/glossary/#mainnet), this is configured as 900 [epochs](https://docs.filecoin.io/reference/general/glossary/#epoch).
+
+## Gas
+
+_Gas_ is a property of a [message](https://docs.filecoin.io/reference/general/glossary/#message), corresponding to the resources involved in including that message in a given [block](https://docs.filecoin.io/reference/general/glossary/#block). For each message included in a block, the block’s creator extracts a fee from the message’s sender; this fee is proportional to the message’s gas.
+
+## Mainnet
+
+A portmanteau of “main” and “network, _mainnet_ is a term used to refer to the predominant public-facing network of the Filecoin project and community. The mainnet embodies an expectation of widespread adoption and permanence; changes to its protocol are subject to the adoption of the network participants.
+
+If used as a proper noun, capitalize the term: _“I am providing on Mainnet.”_
+
+## Message
+
+The term _message_ is used to refer to data stored as part of a [block](https://docs.filecoin.io/reference/general/glossary/#block). A block can contain several messages.
+
+## Merkle Directed Acyclic Graph
+
+Abbreviated as _Merkle DAG_. A graph data structure where nodes:
+
+* Have a unique identifier that is the hash of the nodes contents
+* Are directionally related to other nodes
+* Never form a closed loop
+
+Merkle DAGs are a fundamental component for the representation of relationships between content-addressed data in IPLD, which is in turn used by Filecoin.
+
+## Miner
+
+The Filecoin project uses the term _provider_ to refer to participants in the network who provide a service of value to a client. Other blockchains, like Ethereum and Bitcoin, use the term _miner_. At present, the Filecoin specification recognizes two provider types: [storage providers](https://docs.filecoin.io/reference/general/glossary/#storage-provider) and [retrieval providers](https://docs.filecoin.io/reference/general/glossary/#retrieval-provider).
+
+## Pledged storage
+
+Storage capacity that a provider has promised to reserve for the Filecoin network via [Proof-of-Replication](https://docs.filecoin.io/reference/general/glossary/#proof-of-replication-porep) is termed _pledged storage_.
+
+## Proof-of-Storage
+
+Many blockchain networks are underpinned by the notion that participants supply something of value to the blockchain - a contribution that is hard to fake, but which, if actually made, can be trivially verified. Blockchains based in this approach are often said to require “Proof-of-X”, where X is the valued contribution. The Filecoin blockchain values contributions of storage capacity; it is predicated upon a novel _Proof-of-Storage_ construction, distinguishing it from other blockchains that, as is most often the case, require a contribution of computing power.
+
+As a term, Proof-of-Storage refers to the design elements of the Filecoin protocol that allow one to guarantee (to some very high tolerance) that participants that claim to be contributing a given amount of storage are indeed fulfilling that pledge. In fact, Filecoin’s Proof-of-Storage construction provides for a much stronger claim, allowing one to efficiently verify that a participant is storing a _particular piece of data_, without requiring that one have a copy of the file itself.
+
+_Note_: “proof” here is used in an informal sense - typically, these proofs take the form of a probabilistic argument, rather than a concrete proof; that is, it might _technically_ be possible to convince other participants that one is making a contribution one is not, but the possibility is so vanishingly slight as to border on impossibility.
+
+## Proof-of-Replication (PoRep)
+
+_Proof-of-Replication_ is a procedure by which a [storage provider](https://docs.filecoin.io/reference/general/glossary/#storage-provider) can prove to the Filecoin network that they have created a unique copy of some piece of data on the network’s behalf.
+
+## Proof-of-Spacetime (PoSt)
+
+_Proof-of-Spacetime_ is a procedure by which a [storage-provider](https://docs.filecoin.io/reference/general/glossary/#storage-provider) can prove to the Filecoin network they continue to store a unique copy of some data on behalf of the network. Proof-of-Spacetime manifests in two distinct varieties in the present Filecoin specification: [Window Proof-of-Spacetime](https://docs.filecoin.io/reference/general/glossary/#window-proof-of-spacetime-windowpost) and [Winning Proof-of-Spacetime](https://docs.filecoin.io/reference/general/glossary/#winning-proof-of-spacetime-winningpost).
+
+## Quality-adjusted storage power
+
+The storage power a [storage provider](https://docs.filecoin.io/reference/general/glossary/#storage-provider) earns from a storage deal offered by a [verified client](https://docs.filecoin.io/reference/general/glossary/#verified-client) will be augmented by a multiplier. Power totals that take into account this multiplier are termed _quality adjusted_.
+
+## Retrieval provider
+
+A _retrieval provider_ is a Filecoin participant that enters retrieval [deals](https://docs.filecoin.io/reference/general/glossary/#deal) with clients, agreeing to supply a client with a particular file in exchange for [FIL](https://docs.filecoin.io/reference/general/glossary/#fil). Note that unlike [storage providers](https://docs.filecoin.io/reference/general/glossary/#storage-provider), retrieval providers are not additionally rewarded with the ability to add blocks to the Filecoin blockchain; their only reward is the fee they extract from the client.
+
+## Seal
+
+_Sealing_ is one of the fundamental building blocks of the Filecoin protocol. It is a computation-intensive process performed over a [sector](https://docs.filecoin.io/reference/general/glossary/#sector) that results in a unique representation of the sector. The properties of this new representation are essential to the [Proof-of-Replication](https://docs.filecoin.io/reference/general/glossary/#proof-of-replication-porep) and the [Proof-of-Spacetime](https://docs.filecoin.io/reference/general/glossary/#proof-of-spacetime-post) procedures.
+
+## Sector
+
+Storage providers store data on behalf of the Filecoin network in fixed-size blocks of data called _sectors_.
+
+## Slash
+
+When a [fault](https://docs.filecoin.io/reference/general/glossary/#fault) is registered for a [sector](https://docs.filecoin.io/reference/general/glossary/#sector), the Filecoin network will _slash_ the [storage provider](https://docs.filecoin.io/reference/general/glossary/#storage-provider) that is supposed to be storing the sector; that is, it will assess penalties to the provider (to be paid out of the [collateral](https://docs.filecoin.io/reference/general/glossary/#collateral) fronted by the provider) for their failure to uphold their pledge of storage. When slashing takes place, the power a provider earns for the associated sector is subtracted from the provider’s total power for the purposes of [election](https://docs.filecoin.io/reference/general/glossary/#election).
+
+## Storage provider
+
+A _storage provider_ is a Filecoin participant that stores data on behalf of the network. Storage providers are rewarded for this service through payments by clients that contract their services, as well as by periodic authorization to extend the Filecoin [blockchain](https://docs.filecoin.io/reference/general/glossary/#blockchain) with [blocks](https://docs.filecoin.io/reference/general/glossary/#block) of their own creation. When they create a block, storage providers are rewarded with newly minted [FIL](https://docs.filecoin.io/reference/general/glossary/#fil), as well as the transaction fees they can levy on other participants seeking to include [messages](https://docs.filecoin.io/reference/general/glossary/#message) in the block.
+
+## Storage power
+
+A [storage provider’s](https://docs.filecoin.io/reference/general/glossary/#storage-provider) _storage power_ is a value roughly proportional to the amount of storage capacity they make available on behalf of the network via [capacity commitments](https://docs.filecoin.io/reference/general/glossary/#capacity-commitment) or [storage deals](https://docs.filecoin.io/reference/general/glossary/#deal). Storage power is used to select storage providers for rewards in proportion to their contributions to the total network storage capacity.
+
+## Zero-knowledge succinct non-interactive argument of knowledge (zk-SNARK)
+
+An _argument of knowledge_ is a construction by which one party, called the _prover_, can convince another, the _verifier_, that the prover has access to some piece of information. There are several possible constraints on such constructions:
+
+* A _non-interactive_ argument of knowledge has the requirement that just a single message, sent from the prover to the verifier, should serve as a sufficient argument.
+* A _zero-knowledge_ argument of knowledge has the requirement that the verifier should not need access to the knowledge the prover has access to in order to verify the prover’s claim.
+* A _succinct_ argument of knowledge is one that can be “quickly” verified, and which is “small”, for appropriate definitions of both of those terms.
+
+A zero-knowledge, succinct non-interactive argument of knowledge (zk-SNARK) embodies all of these properties. Filecoin utilizes these constructions to enable its distributed network to efficiently verify that [storage providers](https://docs.filecoin.io/reference/general/glossary/#storage-provider) are storing files they pledged to store, without requiring the verifiers to maintain copies of these files themselves.
+
+## Testnet
+
+A portmanteau of “test” and “network, _testnet_ is a term used to refer to one of the [primary Filecoin testing networks](https://network.filecoin.io/#calibration).
+
+Note: if used as a proper noun, capitalize the term. For example, “I am providing on Testnet.”
+
+## Tipset
+
+A [tipset](https://filecoin.io/blog/tipsets-family-based-approach-to-consensus/) is a set of [blocks](https://docs.filecoin.io/reference/general/glossary/#block) that each have the same [height](https://docs.filecoin.io/reference/general/glossary/#block-height) and parent tipset; the Filecoin [blockchain](https://docs.filecoin.io/reference/general/glossary/#blockchain) is a chain of tipsets, rather than a chain of blocks.
+
+Each tipset is assigned a weight corresponding to the amount of storage the network is provided per the commitments encoded in the tipset’s blocks. The consensus protocol of the network directs nodes to build on top of the heaviest chain.
+
+By basing its blockchain on tipsets, Filecoin can allow multiple [storage providers](https://docs.filecoin.io/reference/general/glossary/#storage-provider) to create blocks in the same [epoch](https://docs.filecoin.io/reference/general/glossary/#epoch), increasing network throughput. By construction, this also provides network security: a node that attempts to intentionally prevent the valid blocks of a second node from making it onto the canonical chain runs up against the consensus preference for heavier chains.
+
+## Verified client
+
+To further incentivize the storage of “useful” data over simple [capacity commitments](https://docs.filecoin.io/reference/general/glossary/#capacity-commitment), [storage providers](https://docs.filecoin.io/reference/general/glossary/#storage-provider) have the additional opportunity to compete for special [deals](https://docs.filecoin.io/reference/general/glossary/#deal) offered by [verified clients](https://docs.filecoin.io/reference/general/glossary/#verified-client). Such clients are certified with respect to their intent to offer deals involving the storage of meaningful data, and the power a storage provider earns for these deals is augmented by a multiplier.
+
+## Window Proof-of-Spacetime (WindowPoSt)
+
+_Window Proof-of-Spacetime_ (WindowPoSt) is the mechanism by which the commitments made by [storage providers](https://docs.filecoin.io/reference/general/glossary/#storage-provider) are audited. It sees each 24 hour period broken down into a series of windows. Correspondingly, each storage provider’s set of pledged [sectors](https://docs.filecoin.io/reference/general/glossary/#sector) is partitioned into subsets, one subset for each window. Within a given window, each storage provider must submit a [Proof-of-Spacetime](https://docs.filecoin.io/reference/general/glossary/#proof-of-spacetime-post) for each sector in their respective subset. This requires ready access to each of the challenged sectors, and will result in a proof compressed via [zk-SNARK](https://docs.filecoin.io/reference/general/glossary/#zero-knowledge-succinct-non-interactive-argument-of-knowledge-zk-snark) being published to the Filecoin [blockchain](https://docs.filecoin.io/reference/general/glossary/#blockchain) as a [message](https://docs.filecoin.io/reference/general/glossary/#message) in a [block](https://docs.filecoin.io/reference/general/glossary/#block). In this way, every sector of [pledged storage](https://docs.filecoin.io/reference/general/glossary/#pledged-storage) is audited at least once in any 24 hour period, and a permanent, verifiable, and public record attesting to each storage provider’s continued commitment is kept.
+
+The Filecoin network expects constant availability of stored data. Failing to submit WindowPoSt for a sector will result in a [fault](https://docs.filecoin.io/reference/general/glossary/#fault), and the storage provider supplying the sector will be [slashed](https://docs.filecoin.io/reference/general/glossary/#slash).
+
+## Winning Proof-of-Spacetime (WinningPoSt)
+
+_Winning Proof-of-Spacetime_ (WinningPoSt) is the mechanism by which [storage providers](https://docs.filecoin.io/reference/general/glossary/#storage-provider) are rewarded for their contributions to the Filecoin network. At the beginning of each [epoch](https://docs.filecoin.io/reference/general/glossary/#epoch), a small number of storage providers are [elected](https://docs.filecoin.io/reference/general/glossary/#election) to each mine a new [block](https://docs.filecoin.io/reference/general/glossary/#block). As a requirement for doing so, each provider is tasked with submitting a compressed [Proof-of-Storage](https://docs.filecoin.io/reference/general/glossary/#proof-of-storage) for a specified [sector](https://docs.filecoin.io/reference/general/glossary/#sector). Each elected provider who successfully creates a block is granted [FIL](https://docs.filecoin.io/reference/general/glossary/#fil), as well as the opportunity to charge other Filecoin participants fees to include [messages](https://docs.filecoin.io/reference/general/glossary/#message) in the block.
+
+Storage providers who fail to do this in the necessary window will forfeit their opportunity to mine a block, but will not otherwise incur penalties for their failure to do so.
diff --git a/reference/general/specifications.md b/reference/general/specifications.md
new file mode 100644
index 000000000..98058a37d
--- /dev/null
+++ b/reference/general/specifications.md
@@ -0,0 +1,15 @@
+---
+description: >-
+ This page quickly covers what the Filecoin Specification is, and how you can
+ access it.
+---
+
+# Specifications
+
+The Filecoin specification is the technical document that outlines the details of how the Filecoin network operates, including the rules for participating in the network, the format of the transactions that are used to manage data storage, and the algorithms and data structures that are used to store and retrieve data on the network. The specification is an important reference for developers who are building applications on top of Filecoin, as it provides them with the necessary information to create integrations that are compatible with the rest of network.
+
+The Filecoin specification is a _living_ document, with frequent updates and changes as the Filecoin network grows. The specification is available to view at [specs.filecoin.io](https://spec.filecoin.io/).
+
+specs.filecoin.io
+
+You can suggest changes to the specification [over on GitHub](https://github.com/filecoin-project/specs).
diff --git a/reference/general/tools.md b/reference/general/tools.md
new file mode 100644
index 000000000..7a31b3627
--- /dev/null
+++ b/reference/general/tools.md
@@ -0,0 +1,47 @@
+---
+description: >-
+ This page lists a collection of tools and resources you can use to build ontop
+ of the Filecoin network using the FVM.
+---
+
+# Tools
+
+### Infrastructure libraries
+
+These infrastructure libraries and tools exist to speed up the development of software on top of the Filecoin network.
+
+#### Filecoin signing tools
+
+The [Filecoin signing tools](https://github.com/Zondax/filecoin-signing-tools) provide basic functionality for signing Filecoin transactions in pure JavaScript, WASM and Rust. Currently, the Rust and WASM implementations support:
+
+* Secp256k1
+* BLS
+* CBOR-JSON serialization of transactions
+
+Support for multisignature transaction signing is currently in progress, and the pure JavaScript implementation is less complete than the Rust and WASM implementations. Learn more in the [official documentation](https://docs.zondax.ch/filecoin-signing-tools/).
+
+#### Filecoin addresses
+
+The _filecoin-address_ library is a JavaScript implementation of the Filecoin address type, and can create new address instances, encode addresses, and decode and validate checksums. For further information, including how to install and use, see the [GitHub repository](https://github.com/glifio/modules/tree/primary/packages/filecoin-address).
+
+### Built-in Native Actors
+
+These are some tools that developers will find useful when dealing with built-in actors.
+
+* [WASM Actors Repo](https://github.com/filecoin-project/builtin-actors)
+* [FVM Example Actors](https://github.com/filecoin-project/fvm-example-actors)
+* [FVM AssemblyScript SDK](https://github.com/Zondax/fvm-as-sdk) by Zondax
+* [FVM TinyGo SDK](https://www.notion.so/Filecoin-Virtual-Machine-FVM-Developer-Resources-94cabfd650184f4b9664bd4974e4d329) by Venus (IPFSForce)
+* [FVM High-level Rust SDK](https://github.com/polyphene/fvm-rs-sdk) by Polyphene
+* [Tooling by Glif](https://glif.io/)
+
+### Filecoin improvement proposals
+
+Filecoin improvement proposals (FIPs) are a way for the Filecoin community to discuss potential changes and improvements to the Filecoin network.
+
+* [About](https://github.com/filecoin-project/FIPs)
+* [Filecoin Slack channel for FIP discussion](https://filecoinproject.slack.com/archives/C01EU76LPCJ)
+* [Program Overview & Application](https://airtable.com/shr48kiPOqjwxzX6u)
+* [FVM Foundry Cohort Update Recordings](https://www.youtube.com/playlist?list=PL\_0VrY55uV18DBdFIkN0jdBMF8nadVxWQ)
+* [FVM Foundry Early Builders F/0 Cohort Showcase - Oct 3 2022](https://drive.google.com/file/d/1JLR45vSNScZX7edz9DxwlpYGnVfGm30Q/view?usp=sharing)
+* [FVM Foundry Early Builders F/1 Cohort Kick Off - Oct 5 2022](https://drive.google.com/file/d/1mV0PMunDUvIBqmuNw9VjUJIf4zE4z9LV/view?usp=sharing)
diff --git a/reference/json-rpc/README.md b/reference/json-rpc/README.md
new file mode 100644
index 000000000..f6177f5c8
--- /dev/null
+++ b/reference/json-rpc/README.md
@@ -0,0 +1,87 @@
+---
+description: >-
+ Find out how to manage and interact with the Filecoin network using the
+ standard JSON-RPC API.
+---
+
+# JSON-RPC
+
+## Quick start
+
+The easiest way to test the API is to use Curl commands. A Curl command to the Filecoin network looks something like this:
+
+```curl
+curl --location --request POST '' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"",
+ "params": [],
+ "id":1
+}'
+```
+
+## Step-by-step example
+
+1. In a terminal window, use Curl to request the current chain head from a public [Glif](https://glif.io) node.\
+
+
+* ```shell
+ curl -X POST 'https://api.node.glif.io' \
+ -H 'Content-Type: application/json' \
+ --data '{"jsonrpc":"2.0","id":1,"method":"Filecoin.ChainHead","params":[]}'
+ ```
+
+ ```shell
+ {"jsonrpc":"2.0","result":{"Cids":[{"/":"bafy2bzaceayoigaf3v5muqmknpjfkguse43jp4t2zxhpmykhqynqhkdgpgybc"},{"/":"bafy2bzacecnrtzlhn6h75gm7tozhzuw77plvdhniwzfj7wgmyuju6wn573h22"},{"/":"bafy2bzacecygiaxfsqv7ecb2gvodzh74eret3pchwe5e4j5a3mzlwasvndi6i"},{"/":"bafy2bzacebe477tdmijfse4je2g63gnnkdgzj3ftq6zbygd7toszkrsjts6uu"},{"/":"bafy2bzacedoe6hcxy2cgqzbg4p7qolbd5imbjpjnz2tj4n7o3kw2md4uv2ttq"},{"/":"bafy2bzacec7wbqvskwvolireljmufszdu5nk37yyg4qtxgnrwbyipgoenmhc6"},{"/":"bafy2bzaceahxdiauteywlbjnwj3ntr72qcbamtq3nbvjzyn5wruithpyqyxbm"}],"Blocks":[{"Miner":"f0693008","Ticket":{"VRFProof":"uLR0LHfNBAfQzyYUVBiIEXzyblPv3yPIEsJQGTpaAvO1ZriPZ7wC2IFpw7mrz1RvDQEfsgRXGxb6APTRvrPiFEAe35RFNLKC9SYb64PNcDYwGY4de5LdlHfyUv+Ovwg5"}...
+ ```
+
+ The ChainHead endpoint doesn’t require any input parameters, so we’ve left `params` an empty array `[]`.
+* The above command will output a large chunk of JSON data. You can use [JSON processor JQ](https://stedolan.github.io/jq/) to _prettify_ the output:
+
+ ```
+ ```
+
+1. ```shell
+ curl -X POST 'https://api.node.glif.io' \
+ -H 'Content-Type: application/json' \
+ --data '{"jsonrpc":"2.0","id":1,"method":"Filecoin.ChainHead","params":[]}' \
+ | jq
+ ```
+
+ ```json
+ {
+ "jsonrpc": "2.0",
+ "result": {
+ "Cids": [
+ {
+ "/": "bafy2bzacecrbhy67by4upktab6rvbgd3w5jml7zog4ifoaupo35yo4rbbc4am"
+ },
+ {
+ "/": "bafy2bzacecm42csr2ysmgpj54lz762iom4n4gcafkerijirzsfzq3jni2gqyu"
+ }
+ ],
+ "Blocks": [
+ {
+ "Miner": "f0152747",
+ "Ticket": {
+ ...
+ ```
+
+## Permissions
+
+Each method has specific permissions that must be met before you can receive a response from a Filecoin node. Methods with the `read` permission can be called by anyone at anytime, without the need for a token. All other permissions require you to send an authentication along with you request.
+
+* `read`: Read node state, no private data.
+* `write`: Write to local store / chain, and read permissions.
+* `sign`: Use private keys stored in wallet for signing, read and write permissions.
+* `admin`: Manage permissions, read, write, and sign permissions.
+
+## Authentication
+
+Each node implementation has different ways to generate and manage authentication tokens. Take a look at your node’s specific documentation:
+
+* [Lotus](https://lotus.filecoin.io)
+* [Venus](https://venus.filecoin.io)
+
+If you are using a node provider service like [Glif](https://glif.io) or [Chain.love](https://chain.love), take a look at your providers documentation to find out how to manage authentication tokens.
diff --git a/reference/json-rpc/auth.md b/reference/json-rpc/auth.md
new file mode 100644
index 000000000..90007b1bd
--- /dev/null
+++ b/reference/json-rpc/auth.md
@@ -0,0 +1,40 @@
+# Auth
+
+## AuthNew
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ [
+ "write"
+ ]
+]
+```
+
+Response: `"Ynl0ZSBhcnJheQ=="`
+
+## AuthVerify
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "string value"
+]
+```
+
+Response:
+
+
+```json
+[
+ "write"
+]
+```
diff --git a/reference/json-rpc/beacon.md b/reference/json-rpc/beacon.md
new file mode 100644
index 000000000..bb90dd2ba
--- /dev/null
+++ b/reference/json-rpc/beacon.md
@@ -0,0 +1,32 @@
+---
+description: >-
+ The Beacon method group contains methods for interacting with the random
+ beacon (DRAND)
+---
+
+# Beacon
+
+## BeaconGetEntry
+
+BeaconGetEntry returns the beacon entry for the given Filecoin epoch. If the entry has not yet been produced, the call will block until the entry becomes available
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ 10101
+]
+```
+
+Response:
+
+
+```json
+{
+ "Round": 42,
+ "Data": "Ynl0ZSBhcnJheQ=="
+}
+```
diff --git a/reference/json-rpc/chain.md b/reference/json-rpc/chain.md
new file mode 100644
index 000000000..e61405ca8
--- /dev/null
+++ b/reference/json-rpc/chain.md
@@ -0,0 +1,750 @@
+---
+description: >-
+ The Chain method group contains methods for interacting with the blockchain,
+ but that do not require any form of state computation.
+---
+
+# Chain
+
+## ChainDeleteObj
+
+ChainDeleteObj deletes node referenced by the given CID
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `{}`
+
+## ChainExport
+
+ChainExport returns a stream of bytes with CAR dump of chain data. The exported chain data includes the header chain from the given tipset back to genesis, the entire genesis state, and the most recent ’nroots' state trees. If oldmsgskip is set, messages from before the requested roots are also not included.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ 10101,
+ true,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"Ynl0ZSBhcnJheQ=="`
+
+## ChainGetBlock
+
+ChainGetBlock returns the block specified by the given CID.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "Miner": "f01234",
+ "Ticket": {
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "ElectionProof": {
+ "WinCount": 9,
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "BeaconEntries": [
+ {
+ "Round": 42,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ ],
+ "WinPoStProof": [
+ {
+ "PoStProof": 8,
+ "ProofBytes": "Ynl0ZSBhcnJheQ=="
+ }
+ ],
+ "Parents": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+ ],
+ "ParentWeight": "0",
+ "Height": 10101,
+ "ParentStateRoot": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "ParentMessageReceipts": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Messages": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "BLSAggregate": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "Timestamp": 42,
+ "BlockSig": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "ForkSignaling": 42,
+ "ParentBaseFee": "0"
+}
+```
+
+## ChainGetBlockMessages
+
+ChainGetBlockMessages returns messages stored in the specified block.
+
+Note: If there are multiple blocks in a tipset, it’s likely that some messages will be duplicated. It’s also possible for blocks in a tipset to have different messages from the same sender at the same nonce. When that happens, only the first message (in a block with lowest ticket) will be considered for execution
+
+NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK
+
+DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET Use ChainGetParentMessages, which will perform correct message deduplication
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "BlsMessages": [
+ {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+ ],
+ "SecpkMessages": [
+ {
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+ ],
+ "Cids": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+ ]
+}
+```
+
+## ChainGetGenesis
+
+ChainGetGenesis returns the genesis tipset.
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "Cids": null,
+ "Blocks": null,
+ "Height": 0
+}
+```
+
+## ChainGetMessage
+
+ChainGetMessage reads a message referenced by the specified CID from the chain blockstore.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+}
+```
+
+## ChainGetMessagesInTipset
+
+ChainGetMessagesInTipset returns message stores in current tipset
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "Cid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+ }
+]
+```
+
+## ChainGetNode
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "string value"
+]
+```
+
+Response:
+
+
+```json
+{
+ "Cid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Obj": {}
+}
+```
+
+## ChainGetParentMessages
+
+ChainGetParentMessages returns messages stored in parent tipset of the specified block.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "Cid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+ }
+]
+```
+
+## ChainGetParentReceipts
+
+ChainGetParentReceipts returns receipts for messages in parent tipset of the specified block. The receipts in the list returned is one-to-one with the messages returned by a call to ChainGetParentMessages with the same blockCid.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ }
+]
+```
+
+## ChainGetPath
+
+ChainGetPath returns a set of revert/apply operations needed to get from one tipset to another, for example:
+
+
+```plaintext
+ to
+ ^
+from tAA
+ ^ ^
+tBA tAB
+ ^---*--^
+ ^
+ tRR
+```
+
+Would return `[revert(tBA), apply(tAB), apply(tAA)]`
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "Type": "string value",
+ "Val": {
+ "Cids": null,
+ "Blocks": null,
+ "Height": 0
+ }
+ }
+]
+```
+
+## ChainGetRandomnessFromBeacon
+
+ChainGetRandomnessFromBeacon is used to sample the beacon for randomness.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ 2,
+ 10101,
+ "Ynl0ZSBhcnJheQ=="
+]
+```
+
+Response: `"Bw=="`
+
+## ChainGetRandomnessFromTickets
+
+ChainGetRandomnessFromTickets is used to sample the chain for randomness.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ 2,
+ 10101,
+ "Ynl0ZSBhcnJheQ=="
+]
+```
+
+Response: `"Bw=="`
+
+## ChainGetTipSet
+
+ChainGetTipSet returns the tipset specified by the given TipSetKey.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "Cids": null,
+ "Blocks": null,
+ "Height": 0
+}
+```
+
+## ChainGetTipSetByHeight
+
+ChainGetTipSetByHeight looks back for a tipset at the specified epoch. If there are no blocks at the specified epoch, a tipset at an earlier epoch will be returned.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ 10101,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "Cids": null,
+ "Blocks": null,
+ "Height": 0
+}
+```
+
+## ChainHasObj
+
+ChainHasObj checks if a given CID exists in the chain blockstore.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `true`
+
+## ChainHead
+
+ChainHead returns the current head of the chain.
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "Cids": null,
+ "Blocks": null,
+ "Height": 0
+}
+```
+
+## ChainNotify
+
+ChainNotify returns channel with chain head updates. First message is guaranteed to be of length `1`, and type `current`.
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+[
+ {
+ "Type": "string value",
+ "Val": {
+ "Cids": null,
+ "Blocks": null,
+ "Height": 0
+ }
+ }
+]
+```
+
+## ChainReadObj
+
+ChainReadObj reads IPLD nodes referenced by the specified CID from chain blockstore and returns raw bytes.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `"Ynl0ZSBhcnJheQ=="`
+
+## ChainSetHead
+
+ChainSetHead forcefully sets current chain head. Use with caution.
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `{}`
+
+## ChainStatObj
+
+ChainStatObj returns statistics about the graph referenced by ‘obj’. If ‘base’ is also specified, then the returned stat will be a diff between the two objects.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "Size": 42,
+ "Links": 42
+}
+```
+
+## ChainTipSetWeight
+
+ChainTipSetWeight computes weight for the specified tipset.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
diff --git a/reference/json-rpc/client.md b/reference/json-rpc/client.md
new file mode 100644
index 000000000..d79bfdef5
--- /dev/null
+++ b/reference/json-rpc/client.md
@@ -0,0 +1,1071 @@
+---
+description: >-
+ The Client methods all have to do with interacting with the storage and
+ retrieval markets as a client
+---
+
+# Client
+
+## ClientCalcCommP
+
+ClientCalcCommP calculates the CommP for a specified file
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ "string value"
+]
+```
+
+Response:
+
+
+```json
+{
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Size": 1024
+}
+```
+
+## ClientCancelDataTransfer
+
+ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ 3,
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ true
+]
+```
+
+Response: `{}`
+
+## ClientCancelRetrievalDeal
+
+ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ 5
+]
+```
+
+Response: `{}`
+
+## ClientDataTransferUpdates
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": [
+ {
+ "Name": "string value",
+ "Description": "string value",
+ "CreatedTime": "0001-01-01T00:00:00Z",
+ "UpdatedTime": "0001-01-01T00:00:00Z",
+ "Logs": [
+ {
+ "Log": "string value",
+ "UpdatedTime": "0001-01-01T00:00:00Z"
+ }
+ ]
+ }
+ ]
+ }
+}
+```
+
+## ClientDealPieceCID
+
+ClientCalcCommP calculates the CommP and data size of the specified CID
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "PayloadSize": 9,
+ "PieceSize": 1032,
+ "PieceCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+}
+```
+
+## ClientDealSize
+
+ClientDealSize calculates real deal data size
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "PayloadSize": 9,
+ "PieceSize": 1032
+}
+```
+
+## ClientFindData
+
+ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ null
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "Err": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Piece": null,
+ "Size": 42,
+ "MinPrice": "0",
+ "UnsealPrice": "0",
+ "PricePerByte": "0",
+ "PaymentInterval": 42,
+ "PaymentIntervalIncrease": 42,
+ "Miner": "f01234",
+ "MinerPeer": {
+ "Address": "f01234",
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "PieceCID": null
+ }
+ }
+]
+```
+
+## ClientGenCar
+
+ClientGenCar generates a CAR file for the specified file.
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ {
+ "Path": "string value",
+ "IsCAR": true
+ },
+ "string value"
+]
+```
+
+Response: `{}`
+
+## ClientGetDealInfo
+
+ClientGetDealInfo returns the latest information about a given deal.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "ProposalCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "State": 42,
+ "Message": "string value",
+ "DealStages": {
+ "Stages": [
+ {
+ "Name": "string value",
+ "Description": "string value",
+ "ExpectedDuration": "string value",
+ "CreatedTime": "0001-01-01T00:00:00Z",
+ "UpdatedTime": "0001-01-01T00:00:00Z",
+ "Logs": [
+ {
+ "Log": "string value",
+ "UpdatedTime": "0001-01-01T00:00:00Z"
+ }
+ ]
+ }
+ ]
+ },
+ "Provider": "f01234",
+ "DataRef": {
+ "TransferType": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceCid": null,
+ "PieceSize": 1024,
+ "RawBlockSize": 42
+ },
+ "PieceCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Size": 42,
+ "PricePerEpoch": "0",
+ "Duration": 42,
+ "DealID": 5432,
+ "CreationTime": "0001-01-01T00:00:00Z",
+ "Verified": true,
+ "TransferChannelID": {
+ "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "ID": 3
+ },
+ "DataTransfer": {
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": [
+ {
+ "Name": "string value",
+ "Description": "string value",
+ "CreatedTime": "0001-01-01T00:00:00Z",
+ "UpdatedTime": "0001-01-01T00:00:00Z",
+ "Logs": [
+ {
+ "Log": "string value",
+ "UpdatedTime": "0001-01-01T00:00:00Z"
+ }
+ ]
+ }
+ ]
+ }
+ }
+}
+```
+
+## ClientGetDealStatus
+
+ClientGetDealStatus returns status given a code
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ 42
+]
+```
+
+Response: `"string value"`
+
+## ClientGetDealUpdates
+
+ClientGetDealUpdates returns the status of updated deals
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "ProposalCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "State": 42,
+ "Message": "string value",
+ "DealStages": {
+ "Stages": [
+ {
+ "Name": "string value",
+ "Description": "string value",
+ "ExpectedDuration": "string value",
+ "CreatedTime": "0001-01-01T00:00:00Z",
+ "UpdatedTime": "0001-01-01T00:00:00Z",
+ "Logs": [
+ {
+ "Log": "string value",
+ "UpdatedTime": "0001-01-01T00:00:00Z"
+ }
+ ]
+ }
+ ]
+ },
+ "Provider": "f01234",
+ "DataRef": {
+ "TransferType": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceCid": null,
+ "PieceSize": 1024,
+ "RawBlockSize": 42
+ },
+ "PieceCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Size": 42,
+ "PricePerEpoch": "0",
+ "Duration": 42,
+ "DealID": 5432,
+ "CreationTime": "0001-01-01T00:00:00Z",
+ "Verified": true,
+ "TransferChannelID": {
+ "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "ID": 3
+ },
+ "DataTransfer": {
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": [
+ {
+ "Name": "string value",
+ "Description": "string value",
+ "CreatedTime": "0001-01-01T00:00:00Z",
+ "UpdatedTime": "0001-01-01T00:00:00Z",
+ "Logs": [
+ {
+ "Log": "string value",
+ "UpdatedTime": "0001-01-01T00:00:00Z"
+ }
+ ]
+ }
+ ]
+ }
+ }
+}
+```
+
+## ClientGetRetrievalUpdates
+
+ClientGetRetrievalUpdates returns status of updated retrieval deals
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "PayloadCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "ID": 5,
+ "PieceCID": null,
+ "PricePerByte": "0",
+ "UnsealPrice": "0",
+ "Status": 0,
+ "Message": "string value",
+ "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "BytesReceived": 42,
+ "BytesPaidFor": 42,
+ "TotalPaid": "0",
+ "TransferChannelID": {
+ "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "ID": 3
+ },
+ "DataTransfer": {
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": [
+ {
+ "Name": "string value",
+ "Description": "string value",
+ "CreatedTime": "0001-01-01T00:00:00Z",
+ "UpdatedTime": "0001-01-01T00:00:00Z",
+ "Logs": [
+ {
+ "Log": "string value",
+ "UpdatedTime": "0001-01-01T00:00:00Z"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "Event": 5
+}
+```
+
+## ClientHasLocal
+
+ClientHasLocal indicates whether a certain CID is locally stored.
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `true`
+
+## ClientImport
+
+ClientImport imports file under the specified path into filestore.
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ {
+ "Path": "string value",
+ "IsCAR": true
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "ImportID": 50
+}
+```
+
+## ClientListDataTransfers
+
+ClientListTransfers returns the status of all ongoing transfers of data
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+
+
+```json
+[
+ {
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": [
+ {
+ "Name": "string value",
+ "Description": "string value",
+ "CreatedTime": "0001-01-01T00:00:00Z",
+ "UpdatedTime": "0001-01-01T00:00:00Z",
+ "Logs": [
+ {
+ "Log": "string value",
+ "UpdatedTime": "0001-01-01T00:00:00Z"
+ }
+ ]
+ }
+ ]
+ }
+ }
+]
+```
+
+## ClientListDeals
+
+ClientListDeals returns information about the deals made by the local client.
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+
+
+```json
+[
+ {
+ "ProposalCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "State": 42,
+ "Message": "string value",
+ "DealStages": {
+ "Stages": [
+ {
+ "Name": "string value",
+ "Description": "string value",
+ "ExpectedDuration": "string value",
+ "CreatedTime": "0001-01-01T00:00:00Z",
+ "UpdatedTime": "0001-01-01T00:00:00Z",
+ "Logs": [
+ {
+ "Log": "string value",
+ "UpdatedTime": "0001-01-01T00:00:00Z"
+ }
+ ]
+ }
+ ]
+ },
+ "Provider": "f01234",
+ "DataRef": {
+ "TransferType": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceCid": null,
+ "PieceSize": 1024,
+ "RawBlockSize": 42
+ },
+ "PieceCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Size": 42,
+ "PricePerEpoch": "0",
+ "Duration": 42,
+ "DealID": 5432,
+ "CreationTime": "0001-01-01T00:00:00Z",
+ "Verified": true,
+ "TransferChannelID": {
+ "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "ID": 3
+ },
+ "DataTransfer": {
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": [
+ {
+ "Name": "string value",
+ "Description": "string value",
+ "CreatedTime": "0001-01-01T00:00:00Z",
+ "UpdatedTime": "0001-01-01T00:00:00Z",
+ "Logs": [
+ {
+ "Log": "string value",
+ "UpdatedTime": "0001-01-01T00:00:00Z"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+]
+```
+
+## ClientListImports
+
+ClientListImports lists imported files and their root CIDs
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+
+
+```json
+[
+ {
+ "Key": 50,
+ "Err": "string value",
+ "Root": null,
+ "Source": "string value",
+ "FilePath": "string value",
+ "CARPath": "string value"
+ }
+]
+```
+
+## ClientListRetrievals
+
+ClientQueryAsk returns a signed StorageAsk from the specified miner. ClientListRetrievals returns information about retrievals made by the local client
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+
+
+```json
+[
+ {
+ "PayloadCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "ID": 5,
+ "PieceCID": null,
+ "PricePerByte": "0",
+ "UnsealPrice": "0",
+ "Status": 0,
+ "Message": "string value",
+ "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "BytesReceived": 42,
+ "BytesPaidFor": 42,
+ "TotalPaid": "0",
+ "TransferChannelID": {
+ "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "ID": 3
+ },
+ "DataTransfer": {
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": [
+ {
+ "Name": "string value",
+ "Description": "string value",
+ "CreatedTime": "0001-01-01T00:00:00Z",
+ "UpdatedTime": "0001-01-01T00:00:00Z",
+ "Logs": [
+ {
+ "Log": "string value",
+ "UpdatedTime": "0001-01-01T00:00:00Z"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "Event": 5
+ }
+]
+```
+
+## ClientMinerQueryOffer
+
+ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ null
+]
+```
+
+Response:
+
+
+```json
+{
+ "Err": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Piece": null,
+ "Size": 42,
+ "MinPrice": "0",
+ "UnsealPrice": "0",
+ "PricePerByte": "0",
+ "PaymentInterval": 42,
+ "PaymentIntervalIncrease": 42,
+ "Miner": "f01234",
+ "MinerPeer": {
+ "Address": "f01234",
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "PieceCID": null
+ }
+}
+```
+
+## ClientQueryAsk
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "f01234"
+]
+```
+
+Response:
+
+
+```json
+{
+ "Price": "0",
+ "VerifiedPrice": "0",
+ "MinPieceSize": 1032,
+ "MaxPieceSize": 1032,
+ "Miner": "f01234",
+ "Timestamp": 10101,
+ "Expiry": 10101,
+ "SeqNo": 42
+}
+```
+
+## ClientRemoveImport
+
+ClientRemoveImport removes file import
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ 50
+]
+```
+
+Response: `{}`
+
+## ClientRestartDataTransfer
+
+ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ 3,
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ true
+]
+```
+
+Response: `{}`
+
+## ClientRetrieve
+
+ClientRetrieve initiates the retrieval of a file, as specified in the order.
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ {
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Piece": null,
+ "DatamodelPathSelector": "Links/21/Hash/Links/42/Hash",
+ "Size": 42,
+ "FromLocalCAR": "string value",
+ "Total": "0",
+ "UnsealPrice": "0",
+ "PaymentInterval": 42,
+ "PaymentIntervalIncrease": 42,
+ "Client": "f01234",
+ "Miner": "f01234",
+ "MinerPeer": {
+ "Address": "f01234",
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "PieceCID": null
+ }
+ },
+ {
+ "Path": "string value",
+ "IsCAR": true
+ }
+]
+```
+
+Response: `{}`
+
+## ClientRetrieveTryRestartInsufficientFunds
+
+ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel which are stuck due to insufficient funds
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+Response: `{}`
+
+## ClientRetrieveWithEvents
+
+ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel of status updates.
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ {
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Piece": null,
+ "DatamodelPathSelector": "Links/21/Hash/Links/42/Hash",
+ "Size": 42,
+ "FromLocalCAR": "string value",
+ "Total": "0",
+ "UnsealPrice": "0",
+ "PaymentInterval": 42,
+ "PaymentIntervalIncrease": 42,
+ "Client": "f01234",
+ "Miner": "f01234",
+ "MinerPeer": {
+ "Address": "f01234",
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "PieceCID": null
+ }
+ },
+ {
+ "Path": "string value",
+ "IsCAR": true
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "Event": 5,
+ "Status": 0,
+ "BytesReceived": 42,
+ "FundsSpent": "0",
+ "Err": "string value"
+}
+```
+
+## ClientStartDeal
+
+ClientStartDeal proposes a deal with a miner.
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ {
+ "Data": {
+ "TransferType": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceCid": null,
+ "PieceSize": 1024,
+ "RawBlockSize": 42
+ },
+ "Wallet": "f01234",
+ "Miner": "f01234",
+ "EpochPrice": "0",
+ "MinBlocksDuration": 42,
+ "ProviderCollateral": "0",
+ "DealStartEpoch": 10101,
+ "FastRetrieval": true,
+ "VerifiedDeal": true
+ }
+]
+```
+
+Response: `null`
+
+## ClientStatelessDeal
+
+ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ {
+ "Data": {
+ "TransferType": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceCid": null,
+ "PieceSize": 1024,
+ "RawBlockSize": 42
+ },
+ "Wallet": "f01234",
+ "Miner": "f01234",
+ "EpochPrice": "0",
+ "MinBlocksDuration": 42,
+ "ProviderCollateral": "0",
+ "DealStartEpoch": 10101,
+ "FastRetrieval": true,
+ "VerifiedDeal": true
+ }
+]
+```
+
+Response: `null`
diff --git a/reference/json-rpc/create.md b/reference/json-rpc/create.md
new file mode 100644
index 000000000..67ad0548c
--- /dev/null
+++ b/reference/json-rpc/create.md
@@ -0,0 +1,18 @@
+# Create
+
+## CreateBackup
+
+CreateBackup creates node backup under the specified file name. The method requires that the lotus daemon is running with the LOTUS\_BACKUP\_BASE\_PATH environment variable set to some path, and that the path specified when calling CreateBackup is within the base path
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ "string value"
+]
+```
+
+Response: `{}`
diff --git a/reference/json-rpc/eth.md b/reference/json-rpc/eth.md
new file mode 100644
index 000000000..035e12134
--- /dev/null
+++ b/reference/json-rpc/eth.md
@@ -0,0 +1,944 @@
+---
+description: >-
+ These methods are used for Ethereum-compatible JSON-RPC calls. For in-depth
+ information on each of these methods, take a look at the official Ethereum API
+ documentation.
+---
+
+# Eth
+
+## EthAccounts
+
+This method is intended to return a list of addresses owned by client. However, `eth_accounts` will always return and empty array `[]` since Filecoin does not manage Ethereum private keys.
+
+* Permissions: read
+* Inputs: none
+
+Example:
+
+
+```curl
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_accounts",
+ "params":[],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "result": [],
+ "id": 1
+}
+```
+
+## EthBlockNumber
+
+Returns the number of most recent block.
+
+* Permissions: read
+* Inputs: none
+
+Example:
+
+
+```curl
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_blockNumber",
+ "params":[],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "result": "0x3bb7",
+ "id": 1
+}
+```
+
+## EthCall
+
+Executes a new message call immediately without creating a transaction on the blockchain.
+
+This documentation section is a work-in-progress.
+
+* Permissions: read
+* Input:
+ 1. Object - The transaction call object:
+ * from: DATA, 20 Bytes - (optional) The address the transaction is sent from.
+ * to: DATA, 20 Bytes - The address the transaction is directed to.
+ * gas: QUANTITY - (optional) Integer of the gas provided for the transaction execution. eth\_call consumes zero gas, but this parameter may be needed by some executions.
+ * gasPrice: QUANTITY - (optional) Integer of the gasPrice used for each paid gas
+ * value: QUANTITY - (optional) Integer of the value sent with this transaction
+ * data: DATA - (optional) Hash of the method signature and encoded parameters. For details see [Ethereum Contract ABI in the Solidity documentation](https://docs.soliditylang.org/en/latest/abi-spec.html)
+ 2. QUANTITY|TAG - integer block number, or the string `"latest"`, `"earliest"` or `"pending"`. See the [default block parameter](https://ethereum.org/en/developers/docs/apis/json-rpc/#default-block-parameter).
+
+Inputs:
+
+
+```json
+[
+ {
+ "from": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "gas": "0x5",
+ "gasPrice": "0x0",
+ "value": "0x0",
+ "data": "0x07"
+ },
+ "latest"
+]
+```
+
+## EthChainId
+
+Returns the currently configured chain ID, a value used in replay-protected transaction signing as introduced by EIP-155.
+
+* Permissions: read
+* Inputs: none
+
+Example:
+
+
+```curl
+curl --location --request POST 'https://api.node.glif.io' \ src/languages update/json-rpc-examples
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_chainId",
+ "params":[],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "result": "0x7ab7",
+ "id": 1
+}
+```
+
+## EthEstimateGas
+
+Generates and returns an estimate of how much gas is necessary to allow the transaction to complete. The transaction will not be added to the blockchain. Note that the estimate may be significantly more than the amount of gas actually used by the transaction, for a variety of reasons including EVM mechanics and node performance.
+
+This documentation section is a work-in-progress.
+
+* Permissions: read
+* Inputs:
+ * `object`:
+ * `from`: DATA, 20 Bytes - (optional) The address the transaction is sent from.
+ * \`to: DATA, 20 Bytes - The address to which the transaction is directed to.
+ * \`gas: QUANTITY - (optional) Integer of the gas provided for the transaction execution. eth\_call consumes zero gas, but this parameter may be needed by some executions.
+ * `gasPrice`: QUANTITY - (optional) Integer of the gasPrice used for each paid gas.
+ * `value`: QUANTITY - (optional) Integer of the value sent with this transaction
+ * `data`: DATA - (optional) Hash of the method signature and encoded parameters. For details see Ethereum Contract ABI
+ * `QUANTITY|TAG` - integer block number, or the string “latest”, “earliest” or “pending”.
+
+Example:
+
+
+```curl
+curl --location --request POST 'https://api.node.glif.io' \
+ --header 'Content-Type: application/json' \
+ --data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_estimateGas",
+ "params":[{
+ "from": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "gas": "0x5",
+ "gasPrice": "0x0",
+ "value": "0x0",
+ "data": "0x07"
+ }],
+ "id":1
+ }' | jq
+```
+
+## EthFeeHistory
+
+Returns a collection of historical gas information.
+
+* Permissions: read
+* Inputs:
+ * `BLOCKCOUNT` - Number of blocks in the requested range. Between 1 and 1024 blocks can be requested in a single query. Less than requested may be returned if not all blocks are available.
+ * `NEWESTBLOCK` - Highest number block of the requested range.
+ * `REWARDPERCENTILES` - (optional) A monotonically increasing list of percentile values to sample from each block’s effective priority fees per gas in ascending order, weighted by gas used.
+* Returns:
+ * `object`:
+ * `OLDESTBLOCK` - Lowest number block of the returned range.
+ * `BASEFEEPERGAS` - An array of block base fees per gas. This includes the next block after the newest of the returned range, because this value can be derived from the newest block. Zeroes are returned for pre-EIP-1559 blocks.
+ * `GASUSEDRATIO` - An array of block gas used ratios. These are calculated as the ratio of gasUsed and gasLimit.
+ * `REWARD` - (Optional) An array of effective priority fees per gas data points from a single block. All zeroes are returned if the block is empty.
+
+Example:
+
+
+```curl
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_feeHistory",
+ "params": [ "0x5", "latest", [ 0 ] ],
+ "id":1
+}' | jq
+```
+
+## EthGasPrice
+
+Returns the current price per gas in wei.
+
+* Permissions: read
+* Inputs: none
+
+Example:
+
+
+```curl
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_gasPrice",
+ "params": [],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "result": "0x30bbf",
+ "id": 1
+}
+```
+
+## EthGetBalance
+
+Returns the balance of the account of a given address.
+
+Permissions: read
+
+Input:
+
+1. String - 20 Bytes - Address.
+2. String - Either the hex value of a block number OR One of the following block tags:
+ * `pending`: a sample next block built by the client on top of latest and containing the set of transactions usually taken from local mempool. Intuitively, you can think of these as blocks that have not been mined yet.
+ * `latest`: the most recent block in the canonical chain observed by the client, this block may be reorganized out of the canonical chain even under healthy/normal conditions.
+ * `safe`: the most recent crypto-economically secure block, cannot be reorganized outside of manual intervention driven by community coordination. Intuitively, this block is “unlikely” to be reorganized.
+ * `finalized`: the most recent crypto-economically secure block, that has been accepted by >2/3 of validators. Cannot be reorganized outside of manual intervention driven by community coordination. Intuitively, this block is very unlikely to be reorganized.
+ * `earliest` - The lowest numbered block the client has available. Intuitively, you can think of this as the first block created.
+
+
+```curl
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_getBalance",
+ "params": ["0x3e1F70090cf4476d788C5259F50F89E9fB88bF1a", "latest"],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "result": "0x0",
+ "id": 1
+}
+```
+
+## EthGetBlockByHash
+
+Returns information about a block by tipset, also known as a block hash.
+
+* Permissions: read
+* Inputs:
+ * `array`:
+ * `string`: Tipset of block
+ * `boolean`: If true it returns the full transaction objects, if false only the hashes of the transactions. Defaults to false.
+
+
+```json
+[
+ "0x0707070707070707070707070707070707070707070707070707070707070707",
+ true
+]
+```
+
+## EthGetBlockByNumber
+
+Returns information about a block by block number.
+
+* Permissions: read
+* Inputs:
+ * `QUANTITY|TAG`: integer of a block number, or the string `earliest`, `latest` or `pending`, as in the default block parameter.
+ * `Boolean`: If `true` it returns the full transaction objects, if `false` only the hashes of the transactions.
+
+
+```shell
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_getBlockByNumber",
+ "params":["0x82c9", true],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "result": {
+ "hash": "0xf9a8005d886e6a458003835f7c1fda53c666777fef19ce42db2614c9848adb3f",
+ "parentHash": "0x8aec5230892f858c4c7ee860ca9de1542c9a59e5b809a71125ae3e26c36b7997",
+ "sha3Uncles": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "miner": "0x0000000000000000000000000000000000000000",
+ "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "transactionsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001",
+ "difficulty": "0x0",
+ "totalDifficulty": "0x0",
+ "number": "0x82c9",
+ "gasLimit": "0x2540be400",
+ "gasUsed": "0x0",
+ "timestamp": "0x63857585",
+ "extraData": "",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "nonce": "0x0000000000000000",
+ "baseFeePerGas": "0x64",
+ "size": "0x0",
+ "transactions": [],
+ "uncles": []
+ },
+ "id": 1
+}
+```
+
+## EthGetBlockTransactionCountByHash
+
+Returns the number of messages in the tipset.
+
+* Permissions: read
+* Inputs:
+
+
+```shell
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_getBlockTransactionCountByHash",
+ "params":["0xf9a8005d886e6a458003835f7c1fda53c666777fef19ce42db2614c9848adb3f"],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "result": "0x0",
+ "id": 1
+}
+```
+
+## EthGetBlockTransactionCountByNumber
+
+Returns the number of transactions in a block matching the given tipset.
+
+* Permissions: read
+* Inputs:
+ * `string`: Either the hex value of a block number OR one of the following block tags:
+ * `pending`: A sample next block built by the client on top of latest and containing the set of transactions usually taken from local mempool. Intuitively, you can think of these as blocks that have not been mined yet.
+ * `latest`: The most recent block in the canonical chain observed by the client, this block may be reorganized out of the canonical chain even under healthy/normal conditions.
+ * `safe`: The most recent crypto-economically secure block, cannot be reorganized outside of manual intervention driven by community coordination. Intuitively, this block is “unlikely” to be reorganized.
+ * `finalized`: The most recent crypto-economically secure block, that has been accepted by >2/3 of validators. Cannot be reorganized outside of manual intervention driven by community coordination. Intuitively, this block is very unlikely to be reorganized.
+ * `earliest`: The lowest numbered block the client has available. Intuitively, you can think of this as the first block created.
+
+Example:
+
+
+```shell
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_getBlockTransactionCountByNumber",
+ "params": [ "0x5" ],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": 1,
+ "result": "0x0"
+}
+```
+
+## EthGetCode
+
+Returns code at a given address.
+
+This section of documentation is a work-in-progress.
+
+* Permissions: read
+* Inputs:
+ * `string`: 20 byte address.
+ * `string`: Either the hex value of a block number OR one of the following block tags:
+ * `pending` - A sample next block built by the client on top of latest and containing the set of transactions usually taken from local mempool. Intuitively, you can think of these as blocks that have not been mined yet.
+ * `latest` - The most recent block in the canonical chain observed by the client, this block may be reorganized out of the canonical chain even under healthy/normal conditions.
+ * `safe` - The most recent crypto-economically secure block, cannot be reorganized outside of manual intervention driven by community coordination. Intuitively, this block is “unlikely” to be reorganized.
+ * `finalized` - The most recent crypto-economically secure block, that has been accepted by >2/3 of validators. Cannot be reorganized outside of manual intervention driven by community coordination. Intuitively, this block is very unlikely to be reorganized.
+ * `earliest` - The lowest numbered block the client has available. Intuitively, you can think of this as the first block created.
+
+
+```shell
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_getCode",
+ "params": [ "0x0707070707070707070707070707070707070707", "string value" ],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "id": 0,
+ "jsonrpc": "string",
+ "result": "string"
+}
+```
+
+## EthGetFilterChanges
+
+Polling method for a filter, which returns an array of logs which occurred since last poll.
+
+Permissions: write
+
+Inputs:
+
+
+```json
+[
+ "c5564560217c43e4bc0484df655e9019"
+]
+```
+
+Response:
+
+
+```json
+[
+ {}
+]
+```
+
+## EthGetFilterLogs
+
+Returns event logs matching filter with given id. (requires write perm since timestamp of last filter execution will be written)
+
+Permissions: write
+
+Inputs:
+
+
+```json
+[
+ "c5564560217c43e4bc0484df655e9019"
+]
+```
+
+Response:
+
+
+```json
+[
+ {}
+]
+```
+
+## EthGetLogs
+
+Returns event logs matching given filter spec.
+
+Permissions: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "fromBlock": "2301220",
+ "address": [
+ "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031"
+ ],
+ "topics": null
+ }
+]
+```
+
+Response:
+
+
+```json
+[
+ {}
+]
+```
+
+## EthGetStorageAt
+
+Permissions: read
+
+Inputs:
+
+
+```json
+[
+ "0x0707070707070707070707070707070707070707",
+ "0x07",
+ "string value"
+]
+```
+
+Response: `"0x07"`
+
+## EthGetTransactionByBlockHashAndIndex
+
+Permissions: read
+
+Inputs:
+
+
+```json
+[
+ "0x0707070707070707070707070707070707070707070707070707070707070707",
+ "0x5"
+]
+```
+
+Response:
+
+
+```json
+{
+ "chainId": "0x5",
+ "nonce": "0x5",
+ "hash": "0x0707070707070707070707070707070707070707070707070707070707070707",
+ "blockHash": "0x0707070707070707070707070707070707070707070707070707070707070707",
+ "blockNumber": "0x5",
+ "transacionIndex": "0x5",
+ "from": "0x0707070707070707070707070707070707070707",
+ "to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "value": "0x0",
+ "type": "0x5",
+ "input": "0x07",
+ "gas": "0x5",
+ "maxFeePerGas": "0x0",
+ "maxPriorityFeePerGas": "0x0",
+ "v": "0x07",
+ "r": "0x07",
+ "s": "0x07"
+}
+```
+
+## EthGetTransactionByBlockNumberAndIndex
+
+Permissions: read
+
+Inputs:
+
+
+```json
+[
+ "0x5",
+ "0x5"
+]
+```
+
+Response:
+
+
+```json
+{
+ "chainId": "0x5",
+ "nonce": "0x5",
+ "hash": "0x0707070707070707070707070707070707070707070707070707070707070707",
+ "blockHash": "0x0707070707070707070707070707070707070707070707070707070707070707",
+ "blockNumber": "0x5",
+ "transacionIndex": "0x5",
+ "from": "0x0707070707070707070707070707070707070707",
+ "to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "value": "0x0",
+ "type": "0x5",
+ "input": "0x07",
+ "gas": "0x5",
+ "maxFeePerGas": "0x0",
+ "maxPriorityFeePerGas": "0x0",
+ "v": "0x07",
+ "r": "0x07",
+ "s": "0x07"
+}
+```
+
+## EthGetTransactionByHash
+
+Permissions: read
+
+Inputs:
+
+
+```json
+[
+ "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e"
+]
+```
+
+Response:
+
+
+```json
+{
+ "chainId": "0x5",
+ "nonce": "0x5",
+ "hash": "0x0707070707070707070707070707070707070707070707070707070707070707",
+ "blockHash": "0x0707070707070707070707070707070707070707070707070707070707070707",
+ "blockNumber": "0x5",
+ "transacionIndex": "0x5",
+ "from": "0x0707070707070707070707070707070707070707",
+ "to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "value": "0x0",
+ "type": "0x5",
+ "input": "0x07",
+ "gas": "0x5",
+ "maxFeePerGas": "0x0",
+ "maxPriorityFeePerGas": "0x0",
+ "v": "0x07",
+ "r": "0x07",
+ "s": "0x07"
+}
+```
+
+## EthGetTransactionCount
+
+Permissions: read
+
+Inputs:
+
+
+```json
+[
+ "0x0707070707070707070707070707070707070707",
+ "string value"
+]
+```
+
+Response: `"0x5"`
+
+## EthGetTransactionReceipt
+
+Returns the receipt of a transaction by transaction hash.
+
+Permissions: read
+
+Inputs:
+
+
+```json
+[
+ "0x0707070707070707070707070707070707070707070707070707070707070707"
+]
+```
+
+Response:
+
+
+```json
+{
+ "transactionHash": "0x0707070707070707070707070707070707070707070707070707070707070707",
+ "transactionIndex": "0x5",
+ "blockHash": "0x0707070707070707070707070707070707070707070707070707070707070707",
+ "blockNumber": "0x5",
+ "from": "0x0707070707070707070707070707070707070707",
+ "to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "status": "0x5",
+ "contractAddress": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "cumulativeGasUsed": "0x5",
+ "gasUsed": "0x5",
+ "effectiveGasPrice": "0x0",
+ "logsBloom": "0x07",
+ "logs": [
+ {
+ "address": "0x0707070707070707070707070707070707070707",
+ "data": "0x07",
+ "topics": [
+ "0x07"
+ ],
+ "removed": true,
+ "logIndex": "0x5",
+ "transactionIndex": "0x5",
+ "transactionHash": "0x0707070707070707070707070707070707070707070707070707070707070707",
+ "blockHash": "0x0707070707070707070707070707070707070707070707070707070707070707",
+ "blockNumber": "0x5"
+ }
+ ],
+ "type": "0x5"
+}
+```
+
+## EthMaxPriorityFeePerGas
+
+Returns a fee per gas that is an estimate of how much you can pay as a priority fee, or ’tip’, to get a transaction included in the current block. Generally you will use the value returned from this method to set the `maxFeePerGas` in a subsequent transaction that you are submitting.
+
+* Permissions: read
+* Inputs: none
+
+
+```shell
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_maxPriorityFeePerGas",
+ "params": [],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "result": "0x312da",
+ "id": 1
+}
+```
+
+## EthNewBlockFilter
+
+Installs a persistent filter to notify when a new block arrives.
+
+* Permissions: write
+* Inputs: none
+
+
+```shell
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_newBlockFilter",
+ "params": [ ],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "result": "5f1623cd-5901-4e4c-a0ec-b2c37c113b8e",
+ "id": 1
+}
+```
+
+## EthNewFilter
+
+Creates a filter object, based on filter options, to notify when the state changes (logs). Unlike [`eth_newBlockFilter`](https://docs.filecoin.io/reference/json-rpc/eth/#ethnewblockfilter) which notifies you of all new blocks, you can pass in filter options to track new logs matching the topics specified. To check if the state has changed, call [`eth_getFilterChanges`](https://docs.filecoin.io/reference/json-rpc/eth/#ethgetfilterchanges).
+
+* Permissions: write
+* Inputs:
+ * `array`:
+ * `object`:
+ * `string`: BlockHash. Using blockHash is equivalent to fromBlock = toBlock = the block number with hash blockHash. If blockHash is present in the filter criteria, then neither fromBlock nor toBlock are allowed.
+ * `array`:
+ * `string`: Contract address or a list of addresses from which logs should originate.
+ * `string`: Either the hex value of a block number OR one of the following block tags:
+ * `pending`: A sample next block built by the client on top of latest and containing the set of transactions usually taken from local mempool. Intuitively, you can think of these as blocks that have not been mined yet.
+ * `latest`: The most recent block in the canonical chain observed by the client, this block may be reorganized out of the canonical chain even under healthy/normal conditions.
+ * `safe`: The most recent crypto-economically secure block, cannot be reorganized outside of manual intervention driven by community coordination. Intuitively, this block is “unlikely” to be reorganized.
+ * `finalized`: The most recent crypto-economically secure block, that has been accepted by >2/3 of validators. Cannot be reorganized outside of manual intervention driven by community coordination. Intuitively, this block is very unlikely to be reorganized.
+ * `earliest`: The lowest numbered block the client has available. Intuitively, you can think of this as the first block created.
+
+
+```shell
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_newFilter",
+ "params": [{
+ "fromBlock": "2301220",
+ "address": [
+ "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031"
+ ],
+ "topics": null
+ }],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "result": "4d5981d6-5da2-40c3-85e0-18ecc6b3fd5d",
+ "id": 1
+}
+```
+
+## EthNewPendingTransactionFilter
+
+Creates a filter in the node, to notify when new pending transactions arrive. To check if the state has changed, call [`eth_getFilterChanges`](https://docs.filecoin.io/reference/json-rpc/eth/#ethgetfilterchanges).
+
+* Permissions: write
+* Inputs: none
+
+
+```shell
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_newPendingTransactionFilter",
+ "params": [],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "result": "6512e91c-2bba-49be-ab56-903a84eee5b2",
+ "id": 1
+}
+```
+
+## EthProtocolVersion
+
+Returns the current ethereum protocol version.
+
+* Permissions: read
+* Inputs: none
+
+
+```shell
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_protocolVersion",
+ "params": [],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "result": "0x12",
+ "id": 1
+}
+```
+
+## EthSendRawTransaction
+
+Creates a new message call transaction or a contract creation for signed transactions. Returns 32 Bytes - the transaction hash, or the zero hash if the transaction is not yet available.
+
+* Permissions: read
+* Inputs:
+
+
+```shell
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_sendRawTransaction",
+ "params": ["0x07"],
+ "id":1
+}' | jq
+```
+
+```json
+```
+
+## EthSubscribe
+
+Subscribe to different Ethereum event types like newHeads, logs, pendingTransactions, and minedTransactions using WebSockets. Creates a new subscription for desired events. Sends data as soon as it occurs.
+
+* Permissions: write
+* Inputs:
+ * Event types: specifies the type of event to listen to (ex: new pending transactions, logs, etc).
+ * Optional parameters: optional parameters to include to describe the type of event to listen to (`address` for example).
+
+## EthUninstallFilter
+
+Uninstalls a filter with given id.
+
+* Permissions: write
+* Inputs:
+
+
+```shell
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_uninstallFilter",
+ "params": ["6512e91c-2bba-49be-ab56-903a84eee5b2"],
+ "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "result": true,
+ "id": 1
+}
+```
+
+## EthUnsubscribe
+
+Unsubscribe from different Ethereum event types with a regular RPC call with `eth_unsubscribe` as the method and the `subscriptionId` as the first parameter.
+
+* Permissions: write
+* Inputs:
+ * `Subscription ID`: as previously returned from an `eth_subscribe` call.
+
+
+```shell
+curl --location --request POST 'https://api.node.glif.io' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_unsubscribe",
+ "params": ["b62df77831484129adf6682332ad0725"], "id":1
+}' | jq
+```
+
+```json
+{
+ "jsonrpc": "2.0",
+ "result": true,
+ "id": 1
+}
+```
diff --git a/reference/json-rpc/gas.md b/reference/json-rpc/gas.md
new file mode 100644
index 000000000..8991fe1e6
--- /dev/null
+++ b/reference/json-rpc/gas.md
@@ -0,0 +1,168 @@
+# Gas
+
+## GasEstimateFeeCap
+
+GasEstimateFeeCap estimates gas fee cap
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ 9,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+## GasEstimateGasLimit
+
+GasEstimateGasLimit estimates gas used by the message and returns it. It fails if message fails to execute.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `9`
+
+## GasEstimateGasPremium
+
+GasEstimateGasPremium estimates what gas price should be used for a message to have high likelihood of inclusion in `nblocksincl` epochs.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ 42,
+ "f01234",
+ 9,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+## GasEstimateMessageGas
+
+GasEstimateMessageGas estimates gas values for unset message gas fields
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ {
+ "MaxFee": "0"
+ },
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+}
+```
diff --git a/reference/json-rpc/i.md b/reference/json-rpc/i.md
new file mode 100644
index 000000000..c47ae047d
--- /dev/null
+++ b/reference/json-rpc/i.md
@@ -0,0 +1,9 @@
+# I
+
+## ID
+
+Perms: read
+
+Inputs: `null`
+
+Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"`
diff --git a/reference/json-rpc/log.md b/reference/json-rpc/log.md
new file mode 100644
index 000000000..bef67edd2
--- /dev/null
+++ b/reference/json-rpc/log.md
@@ -0,0 +1,61 @@
+# Log
+
+## LogAlerts
+
+Perms: admin
+
+Inputs: `null`
+
+Response:
+
+
+```json
+[
+ {
+ "Type": {
+ "System": "string value",
+ "Subsystem": "string value"
+ },
+ "Active": true,
+ "LastActive": {
+ "Type": "string value",
+ "Message": "json raw message",
+ "Time": "0001-01-01T00:00:00Z"
+ },
+ "LastResolved": {
+ "Type": "string value",
+ "Message": "json raw message",
+ "Time": "0001-01-01T00:00:00Z"
+ }
+ }
+]
+```
+
+## LogList
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+
+
+```json
+[
+ "string value"
+]
+```
+
+## LogSetLevel
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ "string value",
+ "string value"
+]
+```
diff --git a/reference/json-rpc/market.md b/reference/json-rpc/market.md
new file mode 100644
index 000000000..e6df4b887
--- /dev/null
+++ b/reference/json-rpc/market.md
@@ -0,0 +1,114 @@
+# Market
+
+## MarketAddBalance
+
+MarketAddBalance adds funds to the market actor
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "f01234",
+ "0"
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MarketGetReserved
+
+MarketGetReserved gets the amount of funds that are currently reserved for the address
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+Response: `"0"`
+
+## MarketReleaseFunds
+
+MarketReleaseFunds releases funds reserved by MarketReserveFunds
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "0"
+]
+```
+
+Response: `{}`
+
+## MarketReserveFunds
+
+MarketReserveFunds reserves funds for a deal
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "f01234",
+ "0"
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MarketWithdraw
+
+MarketWithdraw withdraws unlocked funds from the market actor
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "f01234",
+ "0"
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
diff --git a/reference/json-rpc/miner.md b/reference/json-rpc/miner.md
new file mode 100644
index 000000000..2a6f5eea1
--- /dev/null
+++ b/reference/json-rpc/miner.md
@@ -0,0 +1,195 @@
+# Miner
+
+## MinerCreateBlock
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ {
+ "Miner": "f01234",
+ "Parents": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ "Ticket": {
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "Eproof": {
+ "WinCount": 9,
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "BeaconValues": [
+ {
+ "Round": 42,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ ],
+ "Messages": [
+ {
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+ ],
+ "Epoch": 10101,
+ "Timestamp": 42,
+ "WinningPoStProof": [
+ {
+ "PoStProof": 8,
+ "ProofBytes": "Ynl0ZSBhcnJheQ=="
+ }
+ ]
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "Header": {
+ "Miner": "f01234",
+ "Ticket": {
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "ElectionProof": {
+ "WinCount": 9,
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "BeaconEntries": [
+ {
+ "Round": 42,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ ],
+ "WinPoStProof": [
+ {
+ "PoStProof": 8,
+ "ProofBytes": "Ynl0ZSBhcnJheQ=="
+ }
+ ],
+ "Parents": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+ ],
+ "ParentWeight": "0",
+ "Height": 10101,
+ "ParentStateRoot": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "ParentMessageReceipts": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Messages": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "BLSAggregate": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "Timestamp": 42,
+ "BlockSig": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "ForkSignaling": 42,
+ "ParentBaseFee": "0"
+ },
+ "BlsMessages": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+ ],
+ "SecpkMessages": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+ ]
+}
+```
+
+## MinerGetBaseInfo
+
+There are not yet any comments for this method.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ 10101,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "MinerPower": "0",
+ "NetworkPower": "0",
+ "Sectors": [
+ {
+ "SealProof": 8,
+ "SectorNumber": 9,
+ "SectorKey": null,
+ "SealedCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+ }
+ ],
+ "WorkerKey": "f01234",
+ "SectorSize": 34359738368,
+ "PrevBeaconEntry": {
+ "Round": 42,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "BeaconEntries": [
+ {
+ "Round": 42,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ ],
+ "EligibleForMining": true
+}
+```
diff --git a/reference/json-rpc/mpool.md b/reference/json-rpc/mpool.md
new file mode 100644
index 000000000..ba5827e65
--- /dev/null
+++ b/reference/json-rpc/mpool.md
@@ -0,0 +1,565 @@
+---
+description: >-
+ The Mpool methods are for interacting with the message pool. The message pool
+ manages all incoming and outgoing ‘messages’ going over the network.
+---
+
+# Mpool
+
+## MpoolBatchPush
+
+MpoolBatchPush batch pushes a signed message to mempool.
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+## MpoolBatchPushMessage
+
+MpoolBatchPushMessage batch pushes a unsigned message to mempool.
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+ ],
+ {
+ "MaxFee": "0"
+ }
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+]
+```
+
+## MpoolBatchPushUntrusted
+
+MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources.
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+## MpoolClear
+
+MpoolClear clears pending messages from the mpool
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ true
+]
+```
+
+Response: `{}`
+
+## MpoolGetConfig
+
+MpoolGetConfig returns (a copy of) the current mpool config
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "PriorityAddrs": [
+ "f01234"
+ ],
+ "SizeLimitHigh": 123,
+ "SizeLimitLow": 123,
+ "ReplaceByFeeRatio": 12.3,
+ "PruneCooldown": 60000000000,
+ "GasLimitOverestimation": 12.3
+}
+```
+
+## MpoolGetNonce
+
+MpoolGetNonce gets next nonce for the specified sender. Note that this method may not be atomic. Use MpoolPushMessage instead.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+Response: `42`
+
+## MpoolPending
+
+MpoolPending returns pending mempool messages.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+]
+```
+
+## MpoolPush
+
+MpoolPush pushes a signed message to mempool.
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ {
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MpoolPushMessage
+
+MpoolPushMessage atomically assigns a nonce, signs, and pushes a message to mempool. maxFee is only used when GasFeeCap/GasPremium fields aren’t specified
+
+When maxFee is set to 0, MpoolPushMessage will guess appropriate fee based on current chain conditions
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ {
+ "MaxFee": "0"
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+}
+```
+
+## MpoolPushUntrusted
+
+MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ {
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MpoolSelect
+
+MpoolSelect returns a list of pending messages for inclusion in the next block
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ 12.3
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+]
+```
+
+## MpoolSetConfig
+
+MpoolSetConfig sets the mpool config to (a copy of) the supplied config
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ {
+ "PriorityAddrs": [
+ "f01234"
+ ],
+ "SizeLimitHigh": 123,
+ "SizeLimitLow": 123,
+ "ReplaceByFeeRatio": 12.3,
+ "PruneCooldown": 60000000000,
+ "GasLimitOverestimation": 12.3
+ }
+]
+```
+
+Response: `{}`
+
+## MpoolSub
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "Type": 0,
+ "Message": {
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+}
+```
diff --git a/reference/json-rpc/msig.md b/reference/json-rpc/msig.md
new file mode 100644
index 000000000..b1a23f634
--- /dev/null
+++ b/reference/json-rpc/msig.md
@@ -0,0 +1,482 @@
+---
+description: >-
+ The Msig methods are used to interact with multisig wallets on the Filecoin
+ network.
+---
+
+# Msig
+
+## MsigAddApprove
+
+MsigAddApprove approves a previously proposed AddSigner message It takes the following parameters: , , , , ,
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "f01234",
+ 42,
+ "f01234",
+ "f01234",
+ true
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MsigAddCancel
+
+MsigAddCancel cancels a previously proposed AddSigner message It takes the following parameters: , , , ,
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "f01234",
+ 42,
+ "f01234",
+ true
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MsigAddPropose
+
+MsigAddPropose proposes adding a signer in the multisig It takes the following parameters: , , ,
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "f01234",
+ "f01234",
+ true
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MsigApprove
+
+MsigApprove approves a previously-proposed multisig message by transaction ID It takes the following parameters: ,
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ 42,
+ "f01234"
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MsigApproveTxnHash
+
+MsigApproveTxnHash approves a previously-proposed multisig message, specified using both transaction ID and a hash of the parameters used in the proposal. This method of approval can be used to ensure you only approve exactly the transaction you think you are. It takes the following parameters: , , , , , , ,
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ 42,
+ "f01234",
+ "f01234",
+ "0",
+ "f01234",
+ 42,
+ "Ynl0ZSBhcnJheQ=="
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MsigCancel
+
+MsigCancel cancels a previously-proposed multisig message It takes the following parameters: , , , , , ,
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ 42,
+ "f01234",
+ "0",
+ "f01234",
+ 42,
+ "Ynl0ZSBhcnJheQ=="
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MsigCreate
+
+MsigCreate creates a multisig wallet It takes the following parameters: , , , ,
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ 42,
+ [
+ "f01234"
+ ],
+ 10101,
+ "0",
+ "f01234",
+ "0"
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MsigGetAvailableBalance
+
+MsigGetAvailableBalance returns the portion of a multisig’s balance that can be withdrawn or spent
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+## MsigGetPending
+
+MsigGetPending returns pending transactions for the given multisig wallet. Once pending transactions are fully approved, they will no longer appear here.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "ID": 9,
+ "To": "f01234",
+ "Value": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "Approved": [
+ "f01234"
+ ]
+ }
+]
+```
+
+## MsigGetVested
+
+MsigGetVested returns the amount of FIL that vested in a multisig in a certain period. It takes the following parameters: , ,
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+## MsigGetVestingSchedule
+
+MsigGetVestingSchedule returns the vesting details of a given multisig.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "InitialBalance": "0",
+ "StartEpoch": 10101,
+ "UnlockDuration": 10101
+}
+```
+
+## MsigPropose
+
+MsigPropose proposes a multisig message It takes the following parameters: , , , , ,
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "f01234",
+ "0",
+ "f01234",
+ 42,
+ "Ynl0ZSBhcnJheQ=="
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MsigRemoveSigner
+
+MsigRemoveSigner proposes the removal of a signer from the multisig. It accepts the multisig to make the change on, the proposer address to send the message from, the address to be removed, and a boolean indicating whether or not the signing threshold should be lowered by one along with the address removal.
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "f01234",
+ "f01234",
+ true
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MsigSwapApprove
+
+MsigSwapApprove approves a previously proposed SwapSigner It takes the following parameters: , , , , ,
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "f01234",
+ 42,
+ "f01234",
+ "f01234",
+ "f01234"
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MsigSwapCancel
+
+MsigSwapCancel cancels a previously proposed SwapSigner message It takes the following parameters: , , , ,
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "f01234",
+ 42,
+ "f01234",
+ "f01234"
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## MsigSwapPropose
+
+MsigSwapPropose proposes swapping 2 signers in the multisig It takes the following parameters: , , ,
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "f01234",
+ "f01234",
+ "f01234"
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
diff --git a/reference/json-rpc/net.md b/reference/json-rpc/net.md
new file mode 100644
index 000000000..115c233d1
--- /dev/null
+++ b/reference/json-rpc/net.md
@@ -0,0 +1,531 @@
+# Net
+
+## NetAddrsListen
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Addrs": [
+ "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior"
+ ]
+}
+```
+
+## NetAgentVersion
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+]
+```
+
+Response: `"string value"`
+
+## NetAutoNatStatus
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "Reachability": 1,
+ "PublicAddr": "string value"
+}
+```
+
+## NetBandwidthStats
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "TotalIn": 9,
+ "TotalOut": 9,
+ "RateIn": 12.3,
+ "RateOut": 12.3
+}
+```
+
+## NetBandwidthStatsByPeer
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "12D3KooWSXmXLJmBR1M7i9RW9GQPNUhZSzXKzxDHWtAgNuJAbyEJ": {
+ "TotalIn": 174000,
+ "TotalOut": 12500,
+ "RateIn": 100,
+ "RateOut": 50
+ }
+}
+```
+
+## NetBandwidthStatsByProtocol
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "/fil/hello/1.0.0": {
+ "TotalIn": 174000,
+ "TotalOut": 12500,
+ "RateIn": 100,
+ "RateOut": 50
+ }
+}
+```
+
+## NetBlockAdd
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ {
+ "Peers": [
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+ ],
+ "IPAddrs": [
+ "string value"
+ ],
+ "IPSubnets": [
+ "string value"
+ ]
+ }
+]
+```
+
+Response: `{}`
+
+## NetBlockList
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "Peers": [
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+ ],
+ "IPAddrs": [
+ "string value"
+ ],
+ "IPSubnets": [
+ "string value"
+ ]
+}
+```
+
+## NetBlockRemove
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ {
+ "Peers": [
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+ ],
+ "IPAddrs": [
+ "string value"
+ ],
+ "IPSubnets": [
+ "string value"
+ ]
+ }
+]
+```
+
+Response: `{}`
+
+## NetConnect
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ {
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Addrs": [
+ "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior"
+ ]
+ }
+]
+```
+
+Response: `{}`
+
+## NetConnectedness
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+]
+```
+
+Response: `1`
+
+## NetDisconnect
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+]
+```
+
+Response: `{}`
+
+## NetFindPeer
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+]
+```
+
+Response:
+
+
+```json
+{
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Addrs": [
+ "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior"
+ ]
+}
+```
+
+## NetLimit
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "string value"
+]
+```
+
+Response:
+
+
+```json
+{
+ "Memory": 123,
+ "Streams": 3,
+ "StreamsInbound": 1,
+ "StreamsOutbound": 2,
+ "Conns": 4,
+ "ConnsInbound": 3,
+ "ConnsOutbound": 4,
+ "FD": 5
+}
+```
+
+## NetPeerInfo
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+]
+```
+
+Response:
+
+
+```json
+{
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Agent": "string value",
+ "Addrs": [
+ "string value"
+ ],
+ "Protocols": [
+ "string value"
+ ],
+ "ConnMgrMeta": {
+ "FirstSeen": "0001-01-01T00:00:00Z",
+ "Value": 123,
+ "Tags": {
+ "name": 42
+ },
+ "Conns": {
+ "name": "2021-03-08T22:52:18Z"
+ }
+ }
+}
+```
+
+## NetPeers
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+[
+ {
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Addrs": [
+ "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior"
+ ]
+ }
+]
+```
+
+## NetPing
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+]
+```
+
+Response: `60000000000`
+
+## NetProtectAdd
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ [
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+ ]
+]
+```
+
+Response: `{}`
+
+## NetProtectList
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+]
+```
+
+## NetProtectRemove
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ [
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+ ]
+]
+```
+
+Response: `{}`
+
+## NetPubsubScores
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+[
+ {
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Score": {
+ "Score": 12.3,
+ "Topics": {
+ "/blocks": {
+ "TimeInMesh": 60000000000,
+ "FirstMessageDeliveries": 122,
+ "MeshMessageDeliveries": 1234,
+ "InvalidMessageDeliveries": 3
+ }
+ },
+ "AppSpecificScore": 12.3,
+ "IPColocationFactor": 12.3,
+ "BehaviourPenalty": 12.3
+ }
+ }
+]
+```
+
+## NetSetLimit
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ "string value",
+ {
+ "Memory": 123,
+ "Streams": 3,
+ "StreamsInbound": 1,
+ "StreamsOutbound": 2,
+ "Conns": 4,
+ "ConnsInbound": 3,
+ "ConnsOutbound": 4,
+ "FD": 5
+ }
+]
+```
+
+Response: `{}`
+
+## NetStat
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "string value"
+]
+```
+
+Response:
+
+
+```json
+{
+ "System": {
+ "NumStreamsInbound": 123,
+ "NumStreamsOutbound": 123,
+ "NumConnsInbound": 123,
+ "NumConnsOutbound": 123,
+ "NumFD": 123,
+ "Memory": 9
+ },
+ "Transient": {
+ "NumStreamsInbound": 123,
+ "NumStreamsOutbound": 123,
+ "NumConnsInbound": 123,
+ "NumConnsOutbound": 123,
+ "NumFD": 123,
+ "Memory": 9
+ },
+ "Services": {
+ "abc": {
+ "NumStreamsInbound": 1,
+ "NumStreamsOutbound": 2,
+ "NumConnsInbound": 3,
+ "NumConnsOutbound": 4,
+ "NumFD": 5,
+ "Memory": 123
+ }
+ },
+ "Protocols": {
+ "abc": {
+ "NumStreamsInbound": 1,
+ "NumStreamsOutbound": 2,
+ "NumConnsInbound": 3,
+ "NumConnsOutbound": 4,
+ "NumFD": 5,
+ "Memory": 123
+ }
+ },
+ "Peers": {
+ "abc": {
+ "NumStreamsInbound": 1,
+ "NumStreamsOutbound": 2,
+ "NumConnsInbound": 3,
+ "NumConnsOutbound": 4,
+ "NumFD": 5,
+ "Memory": 123
+ }
+ }
+}
+```
diff --git a/reference/json-rpc/paych.md b/reference/json-rpc/paych.md
new file mode 100644
index 000000000..0af9f687a
--- /dev/null
+++ b/reference/json-rpc/paych.md
@@ -0,0 +1,545 @@
+---
+description: The Paych methods are for interacting with and managing payment channels
+---
+
+# Paych
+
+## PaychAllocateLane
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+Response: `42`
+
+## PaychAvailableFunds
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+Response:
+
+
+```json
+{
+ "Channel": "\u003cempty\u003e",
+ "From": "f01234",
+ "To": "f01234",
+ "ConfirmedAmt": "0",
+ "PendingAmt": "0",
+ "NonReservedAmt": "0",
+ "PendingAvailableAmt": "0",
+ "PendingWaitSentinel": null,
+ "QueuedAmt": "0",
+ "VoucherReedeemedAmt": "0"
+}
+```
+
+## PaychAvailableFundsByFromTo
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "f01234"
+]
+```
+
+Response:
+
+
+```json
+{
+ "Channel": "\u003cempty\u003e",
+ "From": "f01234",
+ "To": "f01234",
+ "ConfirmedAmt": "0",
+ "PendingAmt": "0",
+ "NonReservedAmt": "0",
+ "PendingAvailableAmt": "0",
+ "PendingWaitSentinel": null,
+ "QueuedAmt": "0",
+ "VoucherReedeemedAmt": "0"
+}
+```
+
+## PaychCollect
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## PaychGet
+
+There are not yet any comments for this method.
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "f01234",
+ "0"
+]
+```
+
+Response:
+
+
+```json
+{
+ "Channel": "f01234",
+ "WaitSentinel": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+}
+```
+
+## PaychGetWaitReady
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `"f01234"`
+
+## PaychList
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+## PaychNewPayment
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "f01234",
+ [
+ {
+ "Amount": "0",
+ "TimeLockMin": 10101,
+ "TimeLockMax": 10101,
+ "MinSettle": 10101,
+ "Extra": {
+ "Actor": "f01234",
+ "Method": 1,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "Channel": "f01234",
+ "WaitSentinel": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Vouchers": [
+ {
+ "ChannelAddr": "f01234",
+ "TimeLockMin": 10101,
+ "TimeLockMax": 10101,
+ "SecretPreimage": "Ynl0ZSBhcnJheQ==",
+ "Extra": {
+ "Actor": "f01234",
+ "Method": 1,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "Lane": 42,
+ "Nonce": 42,
+ "Amount": "0",
+ "MinSettleHeight": 10101,
+ "Merges": [
+ {
+ "Lane": 42,
+ "Nonce": 42
+ }
+ ],
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ }
+ ]
+}
+```
+
+## PaychSettle
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## PaychStatus
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+Response:
+
+
+```json
+{
+ "ControlAddr": "f01234",
+ "Direction": 1
+}
+```
+
+## PaychVoucherAdd
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ {
+ "ChannelAddr": "f01234",
+ "TimeLockMin": 10101,
+ "TimeLockMax": 10101,
+ "SecretPreimage": "Ynl0ZSBhcnJheQ==",
+ "Extra": {
+ "Actor": "f01234",
+ "Method": 1,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "Lane": 42,
+ "Nonce": 42,
+ "Amount": "0",
+ "MinSettleHeight": 10101,
+ "Merges": [
+ {
+ "Lane": 42,
+ "Nonce": 42
+ }
+ ],
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ },
+ "Ynl0ZSBhcnJheQ==",
+ "0"
+]
+```
+
+Response: `"0"`
+
+## PaychVoucherCheckSpendable
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ {
+ "ChannelAddr": "f01234",
+ "TimeLockMin": 10101,
+ "TimeLockMax": 10101,
+ "SecretPreimage": "Ynl0ZSBhcnJheQ==",
+ "Extra": {
+ "Actor": "f01234",
+ "Method": 1,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "Lane": 42,
+ "Nonce": 42,
+ "Amount": "0",
+ "MinSettleHeight": 10101,
+ "Merges": [
+ {
+ "Lane": 42,
+ "Nonce": 42
+ }
+ ],
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ },
+ "Ynl0ZSBhcnJheQ==",
+ "Ynl0ZSBhcnJheQ=="
+]
+```
+
+Response: `true`
+
+## PaychVoucherCheckValid
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ {
+ "ChannelAddr": "f01234",
+ "TimeLockMin": 10101,
+ "TimeLockMax": 10101,
+ "SecretPreimage": "Ynl0ZSBhcnJheQ==",
+ "Extra": {
+ "Actor": "f01234",
+ "Method": 1,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "Lane": 42,
+ "Nonce": 42,
+ "Amount": "0",
+ "MinSettleHeight": 10101,
+ "Merges": [
+ {
+ "Lane": 42,
+ "Nonce": 42
+ }
+ ],
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ }
+]
+```
+
+Response: `{}`
+
+## PaychVoucherCreate
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "0",
+ 42
+]
+```
+
+Response:
+
+
+```json
+{
+ "Voucher": {
+ "ChannelAddr": "f01234",
+ "TimeLockMin": 10101,
+ "TimeLockMax": 10101,
+ "SecretPreimage": "Ynl0ZSBhcnJheQ==",
+ "Extra": {
+ "Actor": "f01234",
+ "Method": 1,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "Lane": 42,
+ "Nonce": 42,
+ "Amount": "0",
+ "MinSettleHeight": 10101,
+ "Merges": [
+ {
+ "Lane": 42,
+ "Nonce": 42
+ }
+ ],
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ },
+ "Shortfall": "0"
+}
+```
+
+## PaychVoucherList
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "ChannelAddr": "f01234",
+ "TimeLockMin": 10101,
+ "TimeLockMax": 10101,
+ "SecretPreimage": "Ynl0ZSBhcnJheQ==",
+ "Extra": {
+ "Actor": "f01234",
+ "Method": 1,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "Lane": 42,
+ "Nonce": 42,
+ "Amount": "0",
+ "MinSettleHeight": 10101,
+ "Merges": [
+ {
+ "Lane": 42,
+ "Nonce": 42
+ }
+ ],
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ }
+]
+```
+
+## PaychVoucherSubmit
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ {
+ "ChannelAddr": "f01234",
+ "TimeLockMin": 10101,
+ "TimeLockMax": 10101,
+ "SecretPreimage": "Ynl0ZSBhcnJheQ==",
+ "Extra": {
+ "Actor": "f01234",
+ "Method": 1,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "Lane": 42,
+ "Nonce": 42,
+ "Amount": "0",
+ "MinSettleHeight": 10101,
+ "Merges": [
+ {
+ "Lane": 42,
+ "Nonce": 42
+ }
+ ],
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ },
+ "Ynl0ZSBhcnJheQ==",
+ "Ynl0ZSBhcnJheQ=="
+]
+```
+
+Response:
+
+
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
diff --git a/reference/json-rpc/state.md b/reference/json-rpc/state.md
new file mode 100644
index 000000000..71e48102b
--- /dev/null
+++ b/reference/json-rpc/state.md
@@ -0,0 +1,2273 @@
+---
+description: >-
+ The State methods are used to query, inspect, and interact with chain state.
+ Most methods take a TipSetKey as a parameter. The state looked up is the
+ parent state of the tipset.
+---
+
+# State
+
+A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used.
+
+## StateAccountKey
+
+StateAccountKey returns the public key address of the given ID address
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"f01234"`
+
+## StateAllMinerFaults
+
+StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ 10101,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "Miner": "f01234",
+ "Epoch": 10101
+ }
+]
+```
+
+## StateCall
+
+StateCall runs the given message and returns its result without any persisted changes.
+
+StateCall applies the message to the tipset’s parent state. The message is not applied on-top-of the messages in the passed-in tipset.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "MsgCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Msg": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "MsgRct": {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ },
+ "GasCost": {
+ "Message": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "GasUsed": "0",
+ "BaseFeeBurn": "0",
+ "OverEstimationBurn": "0",
+ "MinerPenalty": "0",
+ "MinerTip": "0",
+ "Refund": "0",
+ "TotalCost": "0"
+ },
+ "ExecutionTrace": {
+ "Msg": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "MsgRct": {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ },
+ "Error": "string value",
+ "Duration": 60000000000,
+ "GasCharges": [
+ {
+ "Name": "string value",
+ "loc": [
+ {
+ "File": "string value",
+ "Line": 123,
+ "Function": "string value"
+ }
+ ],
+ "tg": 9,
+ "cg": 9,
+ "sg": 9,
+ "vtg": 9,
+ "vcg": 9,
+ "vsg": 9,
+ "tt": 60000000000,
+ "ex": {}
+ }
+ ],
+ "Subcalls": [
+ {
+ "Msg": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "MsgRct": {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ },
+ "Error": "string value",
+ "Duration": 60000000000,
+ "GasCharges": [
+ {
+ "Name": "string value",
+ "loc": [
+ {
+ "File": "string value",
+ "Line": 123,
+ "Function": "string value"
+ }
+ ],
+ "tg": 9,
+ "cg": 9,
+ "sg": 9,
+ "vtg": 9,
+ "vcg": 9,
+ "vsg": 9,
+ "tt": 60000000000,
+ "ex": {}
+ }
+ ],
+ "Subcalls": null
+ }
+ ]
+ },
+ "Error": "string value",
+ "Duration": 60000000000
+}
+```
+
+## StateChangedActors
+
+StateChangedActors returns all the actors whose states change between the two given state CIDs TODO: Should this take tipset keys instead?
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "t01236": {
+ "Code": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Head": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Nonce": 42,
+ "Balance": "0"
+ }
+}
+```
+
+## StateCirculatingSupply
+
+StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset. This is not used anywhere in the protocol itself, and is only for external consumption.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+## StateCompute
+
+StateCompute is a flexible command that applies the given messages on the given tipset. The messages are run as though the VM were at the provided height.
+
+When called, StateCompute will:
+
+* Load the provided tipset, or use the current chain head if not provided
+* Compute the tipset state of the provided tipset on top of the parent state
+ * (note that this step runs before vmheight is applied to the execution)
+ * Execute state upgrade if any were scheduled at the epoch, or in null blocks preceding the tipset
+ * Call the cron actor on null blocks preceding the tipset
+ * For each block in the tipset
+ * Apply messages in blocks in the specified
+ * Award block reward by calling the reward actor
+ * Call the cron actor for the current epoch
+* If the specified vmheight is higher than the current epoch, apply any needed state upgrades to the state
+* Apply the specified messages to the state
+
+The vmheight parameter sets VM execution epoch, and can be used to simulate message execution in different network versions. If the specified vmheight epoch is higher than the epoch of the specified tipset, any state upgrades until the vmheight will be executed on the state before applying messages specified by the user.
+
+Note that the initial tipset state computation is not affected by the vmheight parameter - only the messages in the `apply` set are
+
+If the caller wants to simply compute the state, vmheight should be set to the epoch of the specified tipset.
+
+Messages in the `apply` parameter must have the correct nonces, and gas values set.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ 10101,
+ [
+ {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+ ],
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Trace": [
+ {
+ "MsgCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Msg": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "MsgRct": {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ },
+ "GasCost": {
+ "Message": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "GasUsed": "0",
+ "BaseFeeBurn": "0",
+ "OverEstimationBurn": "0",
+ "MinerPenalty": "0",
+ "MinerTip": "0",
+ "Refund": "0",
+ "TotalCost": "0"
+ },
+ "ExecutionTrace": {
+ "Msg": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "MsgRct": {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ },
+ "Error": "string value",
+ "Duration": 60000000000,
+ "GasCharges": [
+ {
+ "Name": "string value",
+ "loc": [
+ {
+ "File": "string value",
+ "Line": 123,
+ "Function": "string value"
+ }
+ ],
+ "tg": 9,
+ "cg": 9,
+ "sg": 9,
+ "vtg": 9,
+ "vcg": 9,
+ "vsg": 9,
+ "tt": 60000000000,
+ "ex": {}
+ }
+ ],
+ "Subcalls": [
+ {
+ "Msg": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "MsgRct": {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ },
+ "Error": "string value",
+ "Duration": 60000000000,
+ "GasCharges": [
+ {
+ "Name": "string value",
+ "loc": [
+ {
+ "File": "string value",
+ "Line": 123,
+ "Function": "string value"
+ }
+ ],
+ "tg": 9,
+ "cg": 9,
+ "sg": 9,
+ "vtg": 9,
+ "vcg": 9,
+ "vsg": 9,
+ "tt": 60000000000,
+ "ex": {}
+ }
+ ],
+ "Subcalls": null
+ }
+ ]
+ },
+ "Error": "string value",
+ "Duration": 60000000000
+ }
+ ]
+}
+```
+
+## StateDealProviderCollateralBounds
+
+StateDealProviderCollateralBounds returns the min and max collateral a storage provider can issue. It takes the deal size and verified status as parameters.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ 1032,
+ true,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "Min": "0",
+ "Max": "0"
+}
+```
+
+## StateDecodeParams
+
+StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ 1,
+ "Ynl0ZSBhcnJheQ==",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `{}`
+
+## StateGetActor
+
+StateGetActor returns the indicated actor’s nonce and balance.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "Code": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Head": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Nonce": 42,
+ "Balance": "0"
+}
+```
+
+## StateGetRandomnessFromBeacon
+
+StateGetRandomnessFromBeacon is used to sample the beacon for randomness.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ 2,
+ 10101,
+ "Ynl0ZSBhcnJheQ==",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"Bw=="`
+
+## StateGetRandomnessFromTickets
+
+StateGetRandomnessFromTickets is used to sample the chain for randomness.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ 2,
+ 10101,
+ "Ynl0ZSBhcnJheQ==",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"Bw=="`
+
+## StateGetReceipt
+
+StateGetReceipt returns the message receipt for the given message or for a matching gas-repriced replacing message
+
+NOTE: If the requested message was replaced, this method will return the receipt for the replacing message - if the caller needs the receipt for exactly the requested message, use StateSearchMsg().Receipt, and check that MsgLookup.Message is matching the requested CID
+
+DEPRECATED: Use StateSearchMsg, this method won’t be supported in v1 API
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+}
+```
+
+## StateListActors
+
+StateListActors returns the addresses of every actor in the state
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+## StateListMessages
+
+StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "To": "f01234",
+ "From": "f01234"
+ },
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ 10101
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+## StateListMiners
+
+StateListMiners returns the addresses of every miner that has claimed power in the Power Actor
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+## StateLookupID
+
+StateLookupID retrieves the ID address of the given address
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"f01234"`
+
+## StateMarketBalance
+
+StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "Escrow": "0",
+ "Locked": "0"
+}
+```
+
+## StateMarketDeals
+
+StateMarketDeals returns information about every deal in the Storage Market
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "t026363": {
+ "Proposal": {
+ "PieceCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceSize": 1032,
+ "VerifiedDeal": true,
+ "Client": "f01234",
+ "Provider": "f01234",
+ "Label": "string value",
+ "StartEpoch": 10101,
+ "EndEpoch": 10101,
+ "StoragePricePerEpoch": "0",
+ "ProviderCollateral": "0",
+ "ClientCollateral": "0"
+ },
+ "State": {
+ "SectorStartEpoch": 10101,
+ "LastUpdatedEpoch": 10101,
+ "SlashEpoch": 10101
+ }
+ }
+}
+```
+
+## StateMarketParticipants
+
+StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "t026363": {
+ "Escrow": "0",
+ "Locked": "0"
+ }
+}
+```
+
+## StateMarketStorageDeal
+
+StateMarketStorageDeal returns information about the indicated deal
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ 5432,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "Proposal": {
+ "PieceCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceSize": 1032,
+ "VerifiedDeal": true,
+ "Client": "f01234",
+ "Provider": "f01234",
+ "Label": "string value",
+ "StartEpoch": 10101,
+ "EndEpoch": 10101,
+ "StoragePricePerEpoch": "0",
+ "ProviderCollateral": "0",
+ "ClientCollateral": "0"
+ },
+ "State": {
+ "SectorStartEpoch": 10101,
+ "LastUpdatedEpoch": 10101,
+ "SlashEpoch": 10101
+ }
+}
+```
+
+## StateMinerActiveSectors
+
+StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "SectorNumber": 9,
+ "SealProof": 8,
+ "SealedCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "DealIDs": [
+ 5432
+ ],
+ "Activation": 10101,
+ "Expiration": 10101,
+ "DealWeight": "0",
+ "VerifiedDealWeight": "0",
+ "InitialPledge": "0",
+ "ExpectedDayReward": "0",
+ "ExpectedStoragePledge": "0",
+ "SectorKeyCID": null
+ }
+]
+```
+
+## StateMinerAvailableBalance
+
+StateMinerAvailableBalance returns the portion of a miner’s balance that can be withdrawn or spent
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+## StateMinerDeadlines
+
+StateMinerDeadlines returns all the proving deadlines for the given miner
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "PostSubmissions": [
+ 5,
+ 1
+ ],
+ "DisputableProofCount": 42
+ }
+]
+```
+
+## StateMinerFaults
+
+StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ 5,
+ 1
+]
+```
+
+## StateMinerInfo
+
+StateMinerInfo returns info about the indicated miner
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "Owner": "f01234",
+ "Worker": "f01234",
+ "NewWorker": "f01234",
+ "ControlAddresses": [
+ "f01234"
+ ],
+ "WorkerChangeEpoch": 10101,
+ "PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Multiaddrs": [
+ "Ynl0ZSBhcnJheQ=="
+ ],
+ "WindowPoStProofType": 8,
+ "SectorSize": 34359738368,
+ "WindowPoStPartitionSectors": 42,
+ "ConsensusFaultElapsed": 10101
+}
+```
+
+## StateMinerInitialPledgeCollateral
+
+StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner’s sector
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ {
+ "SealProof": 8,
+ "SectorNumber": 9,
+ "SealedCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "SealRandEpoch": 10101,
+ "DealIDs": [
+ 5432
+ ],
+ "Expiration": 10101,
+ "ReplaceCapacity": true,
+ "ReplaceSectorDeadline": 42,
+ "ReplaceSectorPartition": 42,
+ "ReplaceSectorNumber": 9
+ },
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+## StateMinerPartitions
+
+StateMinerPartitions returns all partitions in the specified deadline
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ 42,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "AllSectors": [
+ 5,
+ 1
+ ],
+ "FaultySectors": [
+ 5,
+ 1
+ ],
+ "RecoveringSectors": [
+ 5,
+ 1
+ ],
+ "LiveSectors": [
+ 5,
+ 1
+ ],
+ "ActiveSectors": [
+ 5,
+ 1
+ ]
+ }
+]
+```
+
+## StateMinerPower
+
+StateMinerPower returns the power of the indicated miner
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "MinerPower": {
+ "RawBytePower": "0",
+ "QualityAdjPower": "0"
+ },
+ "TotalPower": {
+ "RawBytePower": "0",
+ "QualityAdjPower": "0"
+ },
+ "HasMinPower": true
+}
+```
+
+## StateMinerPreCommitDepositForPower
+
+StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner’s sector
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ {
+ "SealProof": 8,
+ "SectorNumber": 9,
+ "SealedCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "SealRandEpoch": 10101,
+ "DealIDs": [
+ 5432
+ ],
+ "Expiration": 10101,
+ "ReplaceCapacity": true,
+ "ReplaceSectorDeadline": 42,
+ "ReplaceSectorPartition": 42,
+ "ReplaceSectorNumber": 9
+ },
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+## StateMinerProvingDeadline
+
+StateMinerProvingDeadline calculates the deadline at some epoch for a proving period and returns the deadline-related calculations.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "CurrentEpoch": 10101,
+ "PeriodStart": 10101,
+ "Index": 42,
+ "Open": 10101,
+ "Close": 10101,
+ "Challenge": 10101,
+ "FaultCutoff": 10101,
+ "WPoStPeriodDeadlines": 42,
+ "WPoStProvingPeriod": 10101,
+ "WPoStChallengeWindow": 10101,
+ "WPoStChallengeLookback": 10101,
+ "FaultDeclarationCutoff": 10101
+}
+```
+
+## StateMinerRecoveries
+
+StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ 5,
+ 1
+]
+```
+
+## StateMinerSectorAllocated
+
+StateMinerSectorAllocated checks if a sector is allocated
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ 9,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `true`
+
+## StateMinerSectorCount
+
+StateMinerSectorCount returns the number of sectors in a miner’s sector set and proving set
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "Live": 42,
+ "Active": 42,
+ "Faulty": 42
+}
+```
+
+## StateMinerSectors
+
+StateMinerSectors returns info about the given miner’s sectors. If the filter bitfield is nil, all sectors are included.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ 0
+ ],
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+[
+ {
+ "SectorNumber": 9,
+ "SealProof": 8,
+ "SealedCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "DealIDs": [
+ 5432
+ ],
+ "Activation": 10101,
+ "Expiration": 10101,
+ "DealWeight": "0",
+ "VerifiedDealWeight": "0",
+ "InitialPledge": "0",
+ "ExpectedDayReward": "0",
+ "ExpectedStoragePledge": "0",
+ "SectorKeyCID": null
+ }
+]
+```
+
+## StateNetworkName
+
+StateNetworkName returns the name of the network the node is synced to
+
+Perms: read
+
+Inputs: `null`
+
+Response: `"lotus"`
+
+## StateNetworkVersion
+
+StateNetworkVersion returns the network version at the given tipset
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `15`
+
+## StateReadState
+
+StateReadState returns the indicated actor’s state.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "Balance": "0",
+ "Code": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "State": {}
+}
+```
+
+## StateReplay
+
+StateReplay replays a given message, assuming it was included in a block in the specified tipset.
+
+If a tipset key is provided, and a replacing message is found on chain, the method will return an error saying that the message wasn’t found
+
+If no tipset key is provided, the appropriate tipset is looked up, and if the message was gas-repriced, the on-chain message will be replayed - in that case the returned InvocResult.MsgCid will not match the Cid param
+
+If the caller wants to ensure that exactly the requested message was executed, they MUST check that InvocResult.MsgCid is equal to the provided Cid. Without this check both the requested and original message may appear as successfully executed on-chain, which may look like a double-spend.
+
+A replacing message is a message with a different CID, any of Gas values, and different signature, but with all other parameters matching (source/destination, nonce, params, etc.)
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "MsgCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Msg": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "MsgRct": {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ },
+ "GasCost": {
+ "Message": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "GasUsed": "0",
+ "BaseFeeBurn": "0",
+ "OverEstimationBurn": "0",
+ "MinerPenalty": "0",
+ "MinerTip": "0",
+ "Refund": "0",
+ "TotalCost": "0"
+ },
+ "ExecutionTrace": {
+ "Msg": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "MsgRct": {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ },
+ "Error": "string value",
+ "Duration": 60000000000,
+ "GasCharges": [
+ {
+ "Name": "string value",
+ "loc": [
+ {
+ "File": "string value",
+ "Line": 123,
+ "Function": "string value"
+ }
+ ],
+ "tg": 9,
+ "cg": 9,
+ "sg": 9,
+ "vtg": 9,
+ "vcg": 9,
+ "vsg": 9,
+ "tt": 60000000000,
+ "ex": {}
+ }
+ ],
+ "Subcalls": [
+ {
+ "Msg": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "MsgRct": {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ },
+ "Error": "string value",
+ "Duration": 60000000000,
+ "GasCharges": [
+ {
+ "Name": "string value",
+ "loc": [
+ {
+ "File": "string value",
+ "Line": 123,
+ "Function": "string value"
+ }
+ ],
+ "tg": 9,
+ "cg": 9,
+ "sg": 9,
+ "vtg": 9,
+ "vcg": 9,
+ "vsg": 9,
+ "tt": 60000000000,
+ "ex": {}
+ }
+ ],
+ "Subcalls": null
+ }
+ ]
+ },
+ "Error": "string value",
+ "Duration": 60000000000
+}
+```
+
+## StateSearchMsg
+
+StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
+
+NOTE: If a replacing message is found on chain, this method will return a MsgLookup for the replacing message - the MsgLookup.Message will be a different CID than the one provided in the ‘cid’ param, MsgLookup.Receipt will contain the result of the execution of the replacing message.
+
+If the caller wants to ensure that exactly the requested message was executed, they MUST check that MsgLookup.Message is equal to the provided ‘cid’. Without this check both the requested and original message may appear as successfully executed on-chain, which may look like a double-spend.
+
+A replacing message is a message with a different CID, any of Gas values, and different signature, but with all other parameters matching (source/destination, nonce, params, etc.)
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "Message": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Receipt": {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ },
+ "ReturnDec": {},
+ "TipSet": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ "Height": 10101
+}
+```
+
+## StateSearchMsgLimited
+
+StateSearchMsgLimited looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
+
+NOTE: If a replacing message is found on chain, this method will return a MsgLookup for the replacing message - the MsgLookup.Message will be a different CID than the one provided in the ‘cid’ param, MsgLookup.Receipt will contain the result of the execution of the replacing message.
+
+If the caller wants to ensure that exactly the requested message was executed, they MUST check that MsgLookup.Message is equal to the provided ‘cid’. Without this check both the requested and original message may appear as successfully executed on-chain, which may look like a double-spend.
+
+A replacing message is a message with a different CID, any of Gas values, and different signature, but with all other parameters matching (source/destination, nonce, params, etc.)
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ 10101
+]
+```
+
+Response:
+
+
+```json
+{
+ "Message": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Receipt": {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ },
+ "ReturnDec": {},
+ "TipSet": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ "Height": 10101
+}
+```
+
+## StateSectorExpiration
+
+StateSectorExpiration returns epoch at which given sector will expire
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ 9,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "OnTime": 10101,
+ "Early": 10101
+}
+```
+
+## StateSectorGetInfo
+
+StateSectorGetInfo returns the on-chain info for the specified miner’s sector. Returns null in case the sector info isn’t found NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate expiration epoch
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ 9,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "SectorNumber": 9,
+ "SealProof": 8,
+ "SealedCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "DealIDs": [
+ 5432
+ ],
+ "Activation": 10101,
+ "Expiration": 10101,
+ "DealWeight": "0",
+ "VerifiedDealWeight": "0",
+ "InitialPledge": "0",
+ "ExpectedDayReward": "0",
+ "ExpectedStoragePledge": "0",
+ "SectorKeyCID": null
+}
+```
+
+## StateSectorPartition
+
+StateSectorPartition finds deadline/partition with the specified sector
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ 9,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "Deadline": 42,
+ "Partition": 42
+}
+```
+
+## StateSectorPreCommitInfo
+
+StateSectorPreCommitInfo returns the PreCommit info for the specified miner’s sector
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ 9,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "Info": {
+ "SealProof": 8,
+ "SectorNumber": 9,
+ "SealedCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "SealRandEpoch": 10101,
+ "DealIDs": [
+ 5432
+ ],
+ "Expiration": 10101,
+ "ReplaceCapacity": true,
+ "ReplaceSectorDeadline": 42,
+ "ReplaceSectorPartition": 42,
+ "ReplaceSectorNumber": 9
+ },
+ "PreCommitDeposit": "0",
+ "PreCommitEpoch": 10101,
+ "DealWeight": "0",
+ "VerifiedDealWeight": "0"
+}
+```
+
+## StateVMCirculatingSupplyInternal
+
+StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset. This is the value reported by the runtime interface to actors code.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+
+
+```json
+{
+ "FilVested": "0",
+ "FilMined": "0",
+ "FilBurnt": "0",
+ "FilLocked": "0",
+ "FilCirculating": "0",
+ "FilReserveDisbursed": "0"
+}
+```
+
+## StateVerifiedClientStatus
+
+StateVerifiedClientStatus returns the data cap for the given address. Returns nil if there is no entry in the data cap table for the address.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+## StateVerifiedRegistryRootKey
+
+StateVerifiedClientStatus returns the address of the Verified Registry’s root key
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"f01234"`
+
+## StateVerifierStatus
+
+StateVerifierStatus returns the data cap for the given address. Returns nil if there is no entry in the data cap table for the address.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+## StateWaitMsg
+
+StateWaitMsg looks back in the chain for a message. If not found, it blocks until the message arrives on chain, and gets to the indicated confidence depth.
+
+NOTE: If a replacing message is found on chain, this method will return a MsgLookup for the replacing message - the MsgLookup.Message will be a different CID than the one provided in the ‘cid’ param, MsgLookup.Receipt will contain the result of the execution of the replacing message.
+
+If the caller wants to ensure that exactly the requested message was executed, they MUST check that MsgLookup.Message is equal to the provided ‘cid’. Without this check both the requested and original message may appear as successfully executed on-chain, which may look like a double-spend.
+
+A replacing message is a message with a different CID, any of Gas values, and different signature, but with all other parameters matching (source/destination, nonce, params, etc.)
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ 42
+]
+```
+
+Response:
+
+
+```json
+{
+ "Message": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Receipt": {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ },
+ "ReturnDec": {},
+ "TipSet": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ "Height": 10101
+}
+```
+
+## StateWaitMsgLimited
+
+StateWaitMsgLimited looks back up to limit epochs in the chain for a message. If not found, it blocks until the message arrives on chain, and gets to the indicated confidence depth.
+
+NOTE: If a replacing message is found on chain, this method will return a MsgLookup for the replacing message - the MsgLookup.Message will be a different CID than the one provided in the ‘cid’ param, MsgLookup.Receipt will contain the result of the execution of the replacing message.
+
+If the caller wants to ensure that exactly the requested message was executed, they MUST check that MsgLookup.Message is equal to the provided ‘cid’. Without this check both the requested and original message may appear as successfully executed on-chain, which may look like a double-spend.
+
+A replacing message is a message with a different CID, any of Gas values, and different signature, but with all other parameters matching (source/destination, nonce, params, etc.)
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ 42,
+ 10101
+]
+```
+
+Response:
+
+
+```json
+{
+ "Message": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Receipt": {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ },
+ "ReturnDec": {},
+ "TipSet": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ "Height": 10101
+}
+```
diff --git a/reference/json-rpc/sync.md b/reference/json-rpc/sync.md
new file mode 100644
index 000000000..2562a038e
--- /dev/null
+++ b/reference/json-rpc/sync.md
@@ -0,0 +1,299 @@
+---
+description: >-
+ The Sync method group contains methods for interacting with and observing the
+ lotus sync service.
+---
+
+# Sync
+
+## SyncCheckBad
+
+SyncCheckBad checks if a block was marked as bad, and if it was, returns the reason.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `"string value"`
+
+## SyncCheckpoint
+
+SyncCheckpoint marks a blocks as checkpointed, meaning that it won’t ever fork away from it.
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `{}`
+
+## SyncIncomingBlocks
+
+SyncIncomingBlocks returns a channel streaming incoming, potentially not yet synced block headers.
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "Miner": "f01234",
+ "Ticket": {
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "ElectionProof": {
+ "WinCount": 9,
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "BeaconEntries": [
+ {
+ "Round": 42,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ ],
+ "WinPoStProof": [
+ {
+ "PoStProof": 8,
+ "ProofBytes": "Ynl0ZSBhcnJheQ=="
+ }
+ ],
+ "Parents": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+ ],
+ "ParentWeight": "0",
+ "Height": 10101,
+ "ParentStateRoot": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "ParentMessageReceipts": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Messages": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "BLSAggregate": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "Timestamp": 42,
+ "BlockSig": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "ForkSignaling": 42,
+ "ParentBaseFee": "0"
+}
+```
+
+## SyncMarkBad
+
+SyncMarkBad marks a blocks as bad, meaning that it won’t ever by synced. Use with extreme caution.
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `{}`
+
+## SyncState
+
+SyncState returns the current status of the lotus sync system.
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+
+
+```json
+{
+ "ActiveSyncs": [
+ {
+ "WorkerID": 42,
+ "Base": {
+ "Cids": null,
+ "Blocks": null,
+ "Height": 0
+ },
+ "Target": {
+ "Cids": null,
+ "Blocks": null,
+ "Height": 0
+ },
+ "Stage": 1,
+ "Height": 10101,
+ "Start": "0001-01-01T00:00:00Z",
+ "End": "0001-01-01T00:00:00Z",
+ "Message": "string value"
+ }
+ ],
+ "VMApplied": 42
+}
+```
+
+## SyncSubmitBlock
+
+SyncSubmitBlock can be used to submit a newly created block to the. network through this node
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ {
+ "Header": {
+ "Miner": "f01234",
+ "Ticket": {
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "ElectionProof": {
+ "WinCount": 9,
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "BeaconEntries": [
+ {
+ "Round": 42,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+ ],
+ "WinPoStProof": [
+ {
+ "PoStProof": 8,
+ "ProofBytes": "Ynl0ZSBhcnJheQ=="
+ }
+ ],
+ "Parents": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+ ],
+ "ParentWeight": "0",
+ "Height": 10101,
+ "ParentStateRoot": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "ParentMessageReceipts": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Messages": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "BLSAggregate": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "Timestamp": 42,
+ "BlockSig": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "ForkSignaling": 42,
+ "ParentBaseFee": "0"
+ },
+ "BlsMessages": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+ ],
+ "SecpkMessages": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+ ]
+ }
+]
+```
+
+Response: `{}`
+
+## SyncUnmarkAllBad
+
+SyncUnmarkAllBad purges bad block cache, making it possible to sync to chains previously marked as bad
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `{}`
+
+## SyncUnmarkBad
+
+SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again.
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `{}`
+
+## SyncValidateTipset
+
+SyncValidateTipset indicates whether the provided tipset is valid or not
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `true`
diff --git a/reference/json-rpc/wallet.md b/reference/json-rpc/wallet.md
new file mode 100644
index 000000000..0d5ae9c18
--- /dev/null
+++ b/reference/json-rpc/wallet.md
@@ -0,0 +1,283 @@
+# Wallet
+
+## WalletBalance
+
+WalletBalance returns the balance of the given address at the current head of the chain.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+Response: `"0"`
+
+## WalletDefaultAddress
+
+WalletDefaultAddress returns the address marked as default in the wallet.
+
+Perms: write
+
+Inputs: `null`
+
+Response: `"f01234"`
+
+## WalletDelete
+
+WalletDelete deletes an address from the wallet.
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+Response: `{}`
+
+## WalletExport
+
+WalletExport returns the private key of an address in the wallet.
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+Response:
+
+
+```json
+{
+ "Type": "bls",
+ "PrivateKey": "Ynl0ZSBhcnJheQ=="
+}
+```
+
+## WalletHas
+
+WalletHas indicates whether the given address is in the wallet.
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+Response: `true`
+
+## WalletImport
+
+WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet.
+
+Perms: admin
+
+Inputs:
+
+
+```json
+[
+ {
+ "Type": "bls",
+ "PrivateKey": "Ynl0ZSBhcnJheQ=="
+ }
+]
+```
+
+Response: `"f01234"`
+
+## WalletList
+
+WalletList lists all the addresses in the wallet.
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+## WalletNew
+
+WalletNew creates a new address in the wallet with the given sigType. Available key types: bls, secp256k1, secp256k1-ledger Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ "bls"
+]
+```
+
+Response: `"f01234"`
+
+## WalletSetDefault
+
+WalletSetDefault marks the given address as the default one.
+
+Perms: write
+
+Inputs:
+
+
+```json
+[
+ "f01234"
+]
+```
+
+Response: `{}`
+
+## WalletSign
+
+WalletSign signs the given bytes using the given address.
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "Ynl0ZSBhcnJheQ=="
+]
+```
+
+Response:
+
+
+```json
+{
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+}
+```
+
+## WalletSignMessage
+
+WalletSignMessage signs the given message using the given address.
+
+Perms: sign
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+]
+```
+
+Response:
+
+
+```json
+{
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+}
+```
+
+## WalletValidateAddress
+
+WalletValidateAddress validates whether a given string can be decoded as a well-formed address
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "string value"
+]
+```
+
+Response: `"f01234"`
+
+## WalletVerify
+
+WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid. The address does not have to be in the wallet.
+
+Perms: read
+
+Inputs:
+
+
+```json
+[
+ "f01234",
+ "Ynl0ZSBhcnJheQ==",
+ {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+]
+```
+
+Response: `true`
diff --git a/reference/reference.md b/reference/reference.md
new file mode 100644
index 000000000..02577ad8e
--- /dev/null
+++ b/reference/reference.md
@@ -0,0 +1,2 @@
+# Reference
+
diff --git a/smart-contracts/advanced/README.md b/smart-contracts/advanced/README.md
new file mode 100644
index 000000000..ab262d0fa
--- /dev/null
+++ b/smart-contracts/advanced/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ This section contains information on the advances features of the Filecoin
+ network, specifically focused at smart contract developers.
+---
+
+# Advanced
+
diff --git a/smart-contracts/advanced/cross-chain-bridges.md b/smart-contracts/advanced/cross-chain-bridges.md
new file mode 100644
index 000000000..252d3cfae
--- /dev/null
+++ b/smart-contracts/advanced/cross-chain-bridges.md
@@ -0,0 +1,62 @@
+---
+description: >-
+ Blockchain networks are often isolated and cannot interact with each other
+ directly, so cross-chain bridges serve as a link between them and bring
+ interoperability between different blockchains.
+---
+
+# Cross-chain bridges
+
+Cross-chain bridges have many use cases, such as enabling decentralized exchanges to support the trading of assets from multiple blockchain networks or allowing users to access decentralized applications (dApps) on different networks. They are also helpful for interoperability between separate blockchain networks, essential for the growth and adoption of blockchain technology.
+
+## Available bridges
+
+Regarding bridges, security is the top concern. The Filecoin team is focused on integrating with notary-based bridges that have a solid security model. Eventually, trustless light-client-based bridging solutions will be available.
+
+### [Axelar](https://axelar.network/)
+
+Axelar enables both token bridge and general message passing and is well-connected to major EVM chains & Cosmos ecosystem.
+
+Initially, the bridge will support the following assets: wFIL, wETH, wBTC, USDC, and USDT.
+
+#### **Axelar smart contracts**
+
+Currently, Axelar supports Filecoin Mainnet.
+
+| Name | Mainnet |
+| ------- | -------------------------------------------- |
+| wFIL | `0x60E1773636CF5E4A227d9AC24F20fEca034ee25A` |
+| axlUSDC | `0xEB466342C4d449BC9f53A865D5Cb90586f405215` |
+| axlUSDT | `0x7f5373AE26c3E8FfC4c77b7255DF7eC1A9aF52a6` |
+| axlWBTC | `0x1a35EE4640b0A3B87705B0A4B45D227Ba60Ca2ad` |
+| axlWETH | `0xb829b68f57CC546dA7E5806A929e53bE32a4625D` |
+
+#### **Further Axelar resources**
+
+* [Axelar docs for developers](https://docs.axelar.dev/dev/intro)
+* [Axelar with Squid Router](https://app.squidrouter.com/)
+* [Getting Started with Axelar on FVM Tutorial](https://www.youtube.com/watch?v=L7cw5FhxW4s)
+
+### [Celer](https://cbridge.celer.network/1/314)
+
+Celer is a blockchain interoperability protocol enabling a one-click user experience accessing tokens, DeFi, GameFi, NFTs, governance, and privacy solutions across multiple chains. Celer has been successfully supporting Filecoin on both assets bridging using it’s CBridge and messaging passing through Celer Inter-chain Messaging (Celer IM).
+
+Initially, the bridge will support the following assets: wFIL, wETH, wBTC, USDC, and USDT.
+
+### **Celer smart contracts**
+
+Celar’s CBridge supports both Filecoin Mainnet and Calibration testnet.
+
+| Name | Mainnet | Calibration |
+| ---------- | -------------------------------------------- | -------------------------------------------- |
+| wFIL | `0x60E1773636CF5E4A227d9AC24F20fEca034ee25A` | |
+| USDC | `0x2421db204968A367CC2C866CD057fA754Cb84EdF` | `0xf5C6825015280CdfD0b56903F9F8B5A2233476F5` |
+| USDT | `0x422849b355039bc58f2780cc4854919fc9cfaf94` | `0x7d43AABC515C356145049227CeE54B608342c0ad` |
+| WBTC | `0x592786e04c47844aa3b343b19ef2f50a255a477f` | `0x265B25e22bcd7f10a5bD6E6410F10537Cc7567e8` |
+| WETH | `0x522b61755b5ff8176b2931da7bf1a5f9414eb710` | `0x5471ea8f739dd37E9B81Be9c5c77754D8AA953E4` |
+| MessageBus | `0x6ff2130fbdd2837b0c92d7f56f6c017642d84f66` | `0xd5818D039A702DdccfD11A900A40B3dc6DA03CEc` |
+
+### **Further Celer resources**
+
+* [cBridge docs](https://cbridge-docs.celer.network/)
+* [Celer IM Docs](https://im-docs.celer.network/developer/celer-im-overview)
diff --git a/smart-contracts/advanced/oracles.md b/smart-contracts/advanced/oracles.md
new file mode 100644
index 000000000..cd75847c1
--- /dev/null
+++ b/smart-contracts/advanced/oracles.md
@@ -0,0 +1,49 @@
+---
+description: >-
+ Oracles act as a bridge between the Filecoin network and external data
+ sources. Secure oracles allow smart contracts on the FVM to access and use
+ external data sources.
+---
+
+# Oracles
+
+In the Filecoin network, on-chain data and the state of smart contracts are isolated from external data sources. They cannot access real-world information without breaking the deterministic attributes of the network. Since smart contracts cannot access information outside the Filecoin network, oracles are used as trusted entities to provide external data to the network.
+
+Oracles are an essential component of many blockchain applications, as they enable the blockchain to interact with the real world and provide more functionality to blockchain-based systems. Oracles can retrieve data from external sources, verify the data, and submit it to the blockchain for use by smart contracts and decentralized applications (dapps).
+
+Oracles enable builders to integrate the following features into their projects:
+
+* **Price feeds**: DeFi protocols like cross-chain lending rely on oracles for various token or token pair prices.
+* **Cross-chain storage deal verification**: enable applications running on any blockchains to use the Filecoin decentralized storage and allow them to verify deal status and proofs.
+* **Perpetual storage**: enable automated deal renewal and repair with the oracle providing deal status off-chain.
+
+## Available oracles
+
+There are several oracle-protocols built upon the FVM. Builders can integrate these oracles into their applications today.
+
+### [Tellor](https://tellor.io/)
+
+Tellor is an _optimistic_ oracle. Builders should not accept instant price quotes and should wait a few minutes before locking in details.
+
+Tellor supports a price feed oracle and a data oracle for the Filecoin network. The data oracle can provide Filecoin-specific data, such as the reputation of storage providers, which helps lending protocols determine interest rates for SPs.
+
+**Tellor smart contracts**
+
+Teller’s smart contracts are live on the Filecoin Mainnet and Calibration testnet.
+
+| Name | Address | Mainnet | Calibration |
+| ---------------- | -------------------------------------------- | ------- | ----------- |
+| Bridged TRB | `0x045CE60839d108B43dF9e703d4b25402a6a28a0d` | ✔️ | |
+| Playground/TRB | `0x15e6Cc0D69A162151Cadfba035aa10b82b12b970` | | ✔️ |
+| Oracle | `0xb2CB696fE5244fB9004877e58dcB680cB86Ba444` | ✔️ | ✔️ |
+| Governance | `0xb55bB55f7D8b4F26Bd18198088C96488D95cab39` | ✔️ | ✔️ |
+| Autopay | `0x60cBf3991F05a0671250e673Aa166e9D1A0C662E` | ✔️ | ✔️ |
+| TellorFlex | `0xb2CB696fE5244fB9004877e58dcB680cB86Ba444` | ✔️ | ✔️ |
+| QueryDataStorage | `0xf44166ca8bdB612268a4D401e4c5147968E5a190` | ✔️ | ✔️ |
+| Multisig | `0x34Fae97547E990ef0E05e05286c51E4645bf1A85` | ✔️ | ✔️ |
+
+#### **Further Tellor resources**
+
+* [Tellor docs](https://docs.tellor.io/)
+* [Filecoin Storage Insurance Contract](https://github.com/tellor-io/filecoin-query-insurance-impl/tree/main)
+* [Getting Tellor Data for any use case](https://www.youtube.com/watch?v=AQIDqTLguyI) - FVM Dataverse Hackathon
diff --git a/smart-contracts/advanced/wrapped-fil.md b/smart-contracts/advanced/wrapped-fil.md
new file mode 100644
index 000000000..5615051ed
--- /dev/null
+++ b/smart-contracts/advanced/wrapped-fil.md
@@ -0,0 +1,90 @@
+---
+description: >-
+ Wrapped FIL (wFIL) is the canonical wrapper token of the native Filecoin (FIL)
+ token. Wrapped FIL features a 1-to-1 ratio pegged to FIL.
+---
+
+# Wrapped FIL
+
+Wrapped FIL (wFIL) is a wrapper token based on the ERC-20 token standard for the native Filecoin token (FIL). It allows FIL to be bridged and used in Ethereum-compatible decentralized applications (dapps) hosted on other blockchains, such as decentralized exchanges (DEXs), lending platforms, and other places where FIL is not natively supported.
+
+Wrapped FIL operates like any other ERC20-wrapped native blockchain token: a user deposits FIL into the wFIL contract and gets back an equal number of wFIL tokens. When users want to convert their wFIL back to FIL, they can burn the wFIL and unlock the same amount of FIL that was initially locked in the wFIL contract.
+
+Overall, wFIL provides additional liquidity and interoperability for FIL tokens, making the Filecoin network more accessible for a broader range of decentralized finance (defi) use cases across multiple blockchains.
+
+{% hint style="danger" %}
+When wrapping and unwrapping FIL ensure you are using the correct wFIL contract address on Filecoin.
+{% endhint %}
+
+### Wrapped FIL contract addresses
+
+Only use the following addresses when wrapping and unwrapping FIL:
+
+* Mainnet: `0x60E1773636CF5E4A227d9AC24F20fEca034ee25A`
+* Calibration testnet: `0xaC26a4Ab9cF2A8c5DBaB6fb4351ec0F4b07356c4`
+
+### Wrapping and unwrapping process
+
+There are a couple of options for users to wrap and unwrap FIL using a web browser:
+
+* [Glif](https://www.glif.io/)
+* [wfil.io](https://wfil.io/)
+
+To wrap FIL into wFIL, follow these steps:
+
+1. **Obtain FIL**: Ensure you have FIL in your MetaMask wallet before wrapping it.
+2. **Connect your wallet**: You will need to connect your wallet to a platform that supports wFIL wrapping, such as [Glif](https://www.glif.io/) or [wfil.io](https://wfil.io/).
+3. **Wrap your FIL**: After you’ve connected your wallet, you can wrap your FIL by following the platform’s instructions. Generally, you’ll need to select the amount of FIL you want to wrap and confirm the transaction on MetaMask. The platform will then mint an equivalent amount of wFIL and deposit it into your wallet:\
+
+
+ 
+4. **Use wFIL**: Once you have wFIL in your wallet, you can use it on various Defi products that support token swapping or briding wFIL to other blockchains.\
+
+
+ 
+
+To unwrap FIL and receive FIL back to your wallet, users can directly go to supported platforms such as [Glif](https://www.glif.io/) or [wfil.io](https://docs.filecoin.io/smart-contracts/advanced/wrapped-fil/) to unwrap FIL following the platform’s instructions. Once the network confirms the unwrap transaction, FIL tokens are transferred back to your wallet address.
+
+### Programmatic interaction
+
+Developers integrating wFIL into applications or protocols can wrap and unwrap FIL programmatically. The wFIL smart contract is deployed on the Filecoin Mainnet and Calibration testnet.
+
+#### Wrap FIL
+
+To add wrapping features to a project, developers must interact with the wFIL smart contract that manages the wFIL minting and burning process. The source code of the wFIL smart contract is in the [wFIL GitHub repo](https://github.com/glifio/wfil).
+
+Do not directly send FIL to the wFIL contract address. Also, ensure you do not send FIL using the `METHOD_SEND` method. Always use the `InvokeEVM` method.
+
+There are two options to wrap FIL:
+
+1. Call the `deposit()` method in the wFIL contract and attach the amount of FIL tokens users want to wrap. This process will mint wFIL 1:1 and transfer to the `msg.sender` address.
+
+```solidity
+function deposit() public payable virtual {
+ _mint(msg.sender, msg.value);
+ emit Deposit(msg.sender, msg.value);
+}
+```
+
+2. Since the wFIL implements the receive function, you can send FIL to the wFIL contract using the `InvokeEVM` method to wrap FIL. This method will trigger the `deposit` function, minting the caller with wFIL 1:1.
+
+```solidity
+receive() external payable virtual {
+ deposit();
+}
+```
+
+#### Unwrap FIL
+
+To unwrap wFIL into FIL, developers need to call the `withdraw` method in the wFIL contract and specify how many wFIL you would like to unwrap. The `withdraw` method looks like this:
+
+```solidity
+function withdraw(uint _amount) public virtual {
+ _burn(msg.sender, _amount);
+ emit Withdrawal(msg.sender, _amount);
+ payable(msg.sender).sendValue(_amount);
+}
+```
+
+This process will burn the amount of wFIL from the caller’s balance and transfer the unwrapped FIL 1:1 back to the caller’s address.
diff --git a/smart-contracts/developing-contracts/README.md b/smart-contracts/developing-contracts/README.md
new file mode 100644
index 000000000..1f7f022e0
--- /dev/null
+++ b/smart-contracts/developing-contracts/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ This section contains a wealth of information specifically about building
+ smart contracts on the Filecoin network.
+---
+
+# Developing contracts
+
diff --git a/smart-contracts/developing-contracts/best-practices.md b/smart-contracts/developing-contracts/best-practices.md
new file mode 100644
index 000000000..33669ce2f
--- /dev/null
+++ b/smart-contracts/developing-contracts/best-practices.md
@@ -0,0 +1,98 @@
+---
+description: >-
+ This page describes best practices for testing, developing and deploying smart
+ contracts on the Filecoin network.
+---
+
+# Best practices
+
+## Transactions
+
+Best practices for transactions are described below.
+
+### Consistently generating transaction receipts
+
+Since receipts in Filecoin are generated in the next tipset, depending on when a transaction is submitted to the mempool, the receipt may take between 30 and 90 seconds to return. To consistently return transaction receipts when deploying a transaction or awaiting confirmation, change the default transaction receipt timeout (60000 ms or 1 minute for many toolchains) to 90 seconds or more. An example that sets `timeout` to `180000` (3 minutes) for an Open Zeppelin upgradeable proxy is as follows:
+
+```js
+const deployment = await upgrades.deployProxy(contract, preparedArguments, {
+ timeout: 180000
+});
+```
+
+### Unstuck a message from the mempool
+
+When users send messages to the Filecoin network, those messages will first land in the mempool. Once a node receives your message, it will verify the gas fee and signature and then process the transaction. Depending on network traffic and other factors, this process may take some time.
+
+If the Filecoin network still needs to confirm a message, it’s because it has yet to be processed and is sitting in the mempool. Several causes exist, such as network congestion, insufficient gas fees, or an invalid message signature.
+
+We recommend users resubmit the message with a higher gas fee or priority fee so those messages will not block the mempool and potentially impact the block-producing time. Gas fees on the network can fluctuate depending on network demand, so it’s always a good idea to monitor gas prices and adjust your fees accordingly to ensure your transaction is processed promptly.
+
+#### **Metamask**
+
+If you are building your project using MetaMask, it would be easier to educate the users to speed up a transaction by increasing the gas fee directly in MetaMask. Refer to the [official MetaMask documentation](https://support.metamask.io/hc/en-us/articles/360015489251-How-to-speed-up-or-cancel-a-pending-transaction) for more details.
+
+#### **Lotus**
+
+Developers using Lotus can [replace an existing message with an updated gas fee](https://lotus.filecoin.io/kb/update-msg-gas-fee/).
+
+#### **SDKs**
+
+Developers processing messages using SDKs, such as ethers.js or web3.js, must replace the message with higher gas fees by following these steps:
+
+1. Get the original message using its hash.
+2. Create a new message with the same `nonce`, `to`, and `value` fields as the original message.
+3. Set a higher `gasLimit` and `gasPrice` for this message.
+4. Sign and send the new message.
+
+## Futureproofing
+
+Developers should take the time to thoroughly read through the following summary of possible contract future-proofing updates, as failure to properly future proof smart contracts may result in incompatibility with future Filecoin releases.
+
+* **All contracts** must [accept both `DAG_CBOR (0x71)` and `CBOR (0x51)` in inputs and treat them identically, and use `CBOR (0x51)` in outputs](best-practices.md#accept-both-dag\_cbor-0x71-and-cbor-0x51-in-inputs-and-treat-them-identically).
+* If a contract uses the FRC42 hash of `GranularityExported`, it must be updated and redeployed.
+* If a contract sends funds to actors that are non-native, Ethereum, or EVM smart contract accounts, it [must use the `call_actor` precompile](best-practices.md#contracts-sending-funds-to-specific-actors).
+* If a contract is interacting with built-in actors, it must upgrade to the latest version of Filecoin Solidity library, currently `v0.8`.
+
+### All contracts
+
+All contracts must do the following:
+
+#### **Accept both `DAG_CBOR (0x71)` and `CBOR (0x51)` in inputs and treat them identically**
+
+Smart contracts should accept both `DAG_CBOR (0x71)` and `CBOR (0x51)` in inputs and treat them identically. Specifically:
+
+* Treat `DAG_CBOR` and `CBOR` as equivalent when returned from the `call_actor` precompile.
+* Treat `DAG_CBOR` and `CBOR` as equivalent when received as a parameter to `handle_filecoin_method`.
+
+#### **Use CBOR (0x51) in outputs**
+
+Smart contracts should use `CBOR (0x51)` in outputs. Specifically:
+
+* Always pass `CBOR` to the `call_actor` precompile. `DAG_CBOR` is currently forbidden.
+* Always return `CBOR` from `handle_filecoin_method`. `DAG_CBOR` is currently forbidden.
+
+### Contracts using `GranularityExported` hash
+
+The `GranularityExported` method in the Datacap actor was renamed to `Granularity`, so any contracts which use the FRC42 hash of `GranularityExported` (`953701584`) must update the hash to `3936767397` and redeploy.
+
+### Contracts sending funds to specific actors
+
+Any contracts sending funds to actors that are not native accounts (`f1` or `f3` addresses), Ethereum accounts, or EVM smart contracts must now use the `call_actor` precompile. **Solidity’s transfer function will no longer work as that will attempt to invoke the target actor as an EVM contract**.
+
+### Contracts interacting with built-in actors
+
+All contracts interacting with built-in actors must upgrade to the [latest version of Filecoin Solidity library, currently `v0.8`](https://github.com/filecoin-project/filecoin-solidity/tree/master/contracts/v0.8). The IPLD codec used in the `handle_filecoin_method` solidity entrypoint and the `call_actor` should now be `CBOR (0x51)`, not `DAG_CBOR (0x71)`, as previously used. The underlying encoding (i.e. the payload bytes) are the same, but the codec numbers are now different. `DAG_CBOR` support will be re-enabled in the future but the usage of the codec implies additional runtime guarantees that have not yet been implemented.
+
+## Contract Verification
+
+When deploying contracts to mainnet, it is important to verify your contracts to improve transparency, security and trustlessness of the network. The process of verifying your contract involves recompiling your contract’s source code to ensure that the produced bytecode matches the bytecode that is already live on the network since it was deployed.
+
+It is highly recommended for all FVM smart contracts to complete the verification process, soon after deployment.
+
+Developers can easily do so through the following block explorers:
+
+* [Filfox contract verifier](https://filfox.info/en/contract)
+* [Beryx contract verifier](https://beryx.zondax.ch/contract\_verifier)
+
+You can find this tutorial in the [FEVM ERC-20 Quickstart](../fundamentals/erc-20-quickstart.md).
diff --git a/smart-contracts/developing-contracts/call-built-in-actors.md b/smart-contracts/developing-contracts/call-built-in-actors.md
new file mode 100644
index 000000000..b74f6fbe8
--- /dev/null
+++ b/smart-contracts/developing-contracts/call-built-in-actors.md
@@ -0,0 +1,254 @@
+---
+description: >-
+ Filecoin built-in actors can be invoked in a smart contract using either the
+ Protocol API or the Zondax filecoin.solifity library. This page provides
+ instructions on how to use each method.
+---
+
+# Call built-in actors
+
+{% hint style="info" %}
+For conceptual information on built-in actors, including their purposes, how they work and available types, see the [conceptual guide](../../reference/built-in-actors/)
+{% endhint %}
+
+Built-in actors can be invoked using the Protocol _JSON-RPC_ API or the Zondax _filecoin.sol_ API.
+
+## APIs compared
+
+The Protocol _JSON-RPC_ API:
+
+* Is maintained by Protocol Labs (PL).
+* Uses JSON-RPC, a standardized way to encode remote procedure calls in JSON that can be transported using HTTP or WebSockets.
+* Provides a language agnostic interface for Filecoin functionality.
+* Allows applications to access Filecoin functionality using HTTP or WebSockets calls to a Filecoin node, like the Lotus daemon.
+* Requires authentication for some API calls.
+* Serves as the foundation for language-specific libraries (some of which are maintained by organizations other than PL) such as [filecoin.js](https://filecoin-shipyard.github.io/filecoin.js/).
+
+The Zondax _filecoin.sol_ API:
+
+* Is maintained by [Zondax](https://docs.zondax.ch/).
+* Supports [_some but not all_ of the built-in actors and their methods](call-built-in-actors.md#available-actors-and-methods).
+
+## Protocol API
+
+Smart contracts can directly access built-in actors and methods using the Protocol API. Links to the reference guides for each of the available actor methods is listed below:
+
+* [Account actor](call-built-in-actors.md#account)
+* [Datacap](call-built-in-actors.md#datacap)
+* [Miner](call-built-in-actors.md#miner)
+* [Multisig](call-built-in-actors.md#multisig)
+* [Storage market actor](call-built-in-actors.md#storage-market)
+* [Storage power actor](call-built-in-actors.md#storage-power)
+* [Verified registry actor](call-built-in-actors.md#verified-registry)
+
+## Filecoin.sol
+
+Smart contracts can access built-in actor methods with the `filecoin.sol` library, a set of Solidity libraries that allow Solidity smart contracts to seamlessly call methods of Filecoin built-in actors. The `filecoin.sol` library supports cross-platform calls to real Filecoin built-in actors. This section contains information on the actors and methods available from `filecoin.sol`, along with installation instructions and working examples of smart contracts that call built-in actor methods.
+
+To invoke built-in actor methods using `filecoin.sol`, follow these steps:
+
+1. Review the [available actors and methods](https://docs.filecoin.io/smart-contracts/developing-contracts/call-built-in-actors/#available-actors-and-methods).
+2. [Import `filecoin.sol`](https://docs.filecoin.io/smart-contracts/developing-contracts/call-built-in-actors/#import-filecoinsol).
+3. [Call a built-in actor](https://docs.filecoin.io/smart-contracts/developing-contracts/call-built-in-actors/#call-a-built-in-actor).
+
+### Available actors and methods
+
+The majority of the Account, DataCap, Storage Market, Miner, Storage Owner and Verified Registry actor methods are supported and are listed below. **Cron, Payment Channel, Reward and System actor methods are currently not supported.**
+
+[procedure](https://docs.filecoin.io/smart-contracts/developing-contracts/call-built-in-actors/#import-filecoinsol)
+
+#### **Account**
+
+| Method | Supported? |
+| --------------------- | ---------- |
+| AuthenticateMessage | ✔️ |
+| Constructor | ✖️ |
+| PubkeyAddress | ✖️ |
+| UniversalReceiverHook | ✔️ |
+
+#### **DataCap**
+
+| Method | Supported? |
+| ----------------- | ---------- |
+| Allowance | ✔️ |
+| BalanceOf | ✔️ |
+| Burn | ✔️ |
+| BurnFrom | ✔️ |
+| Constructor | ✖️ |
+| DecreaseAllowance | ✔️ |
+| Destroy | ✖️ |
+| IncreaseAllowance | ✔️ |
+| Mint | ✖️ |
+| Name | ✔️ |
+| RevokeAllowance | ✔️ |
+| Symbol | ✔️ |
+| TotalSupply | ✔️ |
+| Transfer | ✔️ |
+| TransferFrom | ✔️ |
+
+#### **Miner**
+
+| Method | Supported? |
+| ------------------------- | ---------- |
+| ApplyRewards | ✖️ |
+| ChangeBeneficiary | ✔️ |
+| ChangeMultiaddrs | ✔️ |
+| ChangeOwnerAddress | ✔️ |
+| ChangePeerID | ✔️ |
+| ChangeWorkerAddress | ✔️ |
+| CheckSectorProven | ✖️ |
+| CompactPartitions | ✖️ |
+| CompactSectorNumbers | ✖️ |
+| ConfirmSectorProofsValid | ✖️ |
+| ConfirmUpdateWorkerKey | ✖️ |
+| Constructor | ✖️ |
+| ControlAddresses | ✖️ |
+| DeclareFaults | ✖️ |
+| DeclareFaultsRecovered | ✖️ |
+| DisputeWindowedPoSt | ✖️ |
+| ExtendSectorExpiration | ✖️ |
+| ExtendSectorExpiration2 | ✖️ |
+| GetAvailableBalance | ✔️ |
+| GetBeneficiary | ✔️ |
+| GetOwner | ✔️ |
+| GetSectorSize | ✔️ |
+| GetVestingFunds | ✔️ |
+| IsControllingAddress | ✔️ |
+| OnDeferredCronEvent | ✖️ |
+| PreCommitSector | ✖️ |
+| PreCommitSectorBatch | ✖️ |
+| PreCommitSectorBatch2 | ✖️ |
+| ProveCommitAggregate | ✖️ |
+| ProveCommitSector | ✖️ |
+| ProveReplicaUpdates | ✖️ |
+| ProveReplicaUpdates2 | ✖️ |
+| Read fee debt | ✖️ |
+| Read initial pledge total | ✖️ |
+| Read peer ID, multiaddr | ✔️ |
+| Read pre-commit deposit | ✖️ |
+| RepayDebt | ✔️ |
+| ReportConsensusFault | ✖️ |
+| SubmitWindowedPoSt | ✖️ |
+| TerminateSectors | ✖️ |
+| WithdrawBalance | ✔️ |
+
+#### **Multisig**
+
+| Method | Supported? |
+| --------------------------- | ---------- |
+| AddSigner | ✔️ |
+| Approve | ✔️ |
+| Cancel | ✔️ |
+| ChangeNumApprovalsThreshold | ✖️ |
+| Constructor | ✖️ |
+| List signers and threshold | ✖️ |
+| LockBalance | ✔️ |
+| Propose | ✔️ |
+| RemoveSigner | ✔️ |
+| SwapSigner | ✔️ |
+| UniversalReceiverHook | ✔️ |
+
+#### **Storage market**
+
+| Method | Supported? |
+| ------------------------- | ---------- |
+| ActivateDeals | ✖️ |
+| AddBalance | ✔️ |
+| ComputeDataCommitment | ✖️ |
+| Constructor | ✖️ |
+| CronTick | ✖️ |
+| GetBalance | ✔️ |
+| GetDealActivation | ✔️ |
+| GetDealClient | ✔️ |
+| GetDealClientCollateral | ✔️ |
+| GetDealDataCommitment | ✔️ |
+| GetDealEpochPrice | ✔️ |
+| GetDealLabel | ✔️ |
+| GetDealProvider | ✔️ |
+| GetDealProviderCollateral | ✔️ |
+| GetDealTerm | ✔️ |
+| GetDealVerified | ✔️ |
+| OnMinerSectorsTerminate | ✖️ |
+| PublishStorageDeals | ✔️ |
+| VerifyDealsForActivation | ✖️ |
+| WithdrawBalance | ✔️ |
+
+#### **Storage power**
+
+| Method | Supported? |
+| ---------------------------------------- | ---------- |
+| Compute pledge collateral for new sector | ✖️ |
+| Constructor | ✖️ |
+| CreateMiner | ✔️ |
+| CurrentTotalPower | ✖️ |
+| EnrollCronEvent | ✖️ |
+| Get miner count, consensus count | ✔️ |
+| Get miner’s QA power | ✖️ |
+| Get network bytes committed? | ✖️ |
+| Get network epoch pledge collateral | ✖️ |
+| Get network epoch QA power | ✖️ |
+| Get network total pledge collateral? | ✖️ |
+| MinerRawPower | ✔️ |
+| NetworkRawPower | ✔️ |
+| OnEpochTickEnd | ✖️ |
+| SubmitPoRepForBulkVerify | ✖️ |
+| UpdateClaimedPower | ✖️ |
+| UpdatePledgeTotal | ✖️ |
+
+#### **Verified registry**
+
+| Method | Supported? |
+| --------------------------- | ---------- |
+| AddVerifiedClient | ✔️ |
+| AddVerifier | ✖️ |
+| ClaimAllocations | ✖️ |
+| Constructor | ✖️ |
+| ExtendClaimTerms | ✔️ |
+| GetClaims | ✔️ |
+| List claims | ✖️ |
+| List/check verifiers | ✖️ |
+| List/get allocations | ✖️ |
+| RemoveExpiredAllocations | ✔️ |
+| RemoveExpiredClaims | ✔️ |
+| RemoveVerifiedClientDataCap | ✖️ |
+| RemoveVerifier | ✖️ |
+| UniversalReceiverHook | ✔️ |
+
+### Import filecoin.sol
+
+The `filecoin.sol` library is embeddable into your smart contract, which means it does not need be present on chain first. Instead, you can just import the library and call the available methods. The `filecoin.sol` library can be [added via `npm`](https://docs.filecoin.io/smart-contracts/developing-contracts/call-built-in-actors/#import-filecoinsol-with-npm) or [manually imported](https://docs.filecoin.io/smart-contracts/developing-contracts/call-built-in-actors/#import-filecoinsol-manually) into your contract. The `npm`-based import is simpler, and is recommended.
+
+#### **Import filecoin.sol with npm**
+
+1. Install [yarn](https://yarnpkg.com/) if you don’t have it installed.
+2. Install `filecoin.sol`:
+
+```
+yarn add @zondax/filecoin.sol
+```
+
+#### **Import filecoin.sol manually**
+
+1. Navigate to your smart contract project folder ``:
+
+```
+cd my-project
+```
+
+2. Create a folder named `libs`:
+
+```
+mkdir libs
+```
+
+3. Move into the `libs` directory:
+
+```
+cd libs
+```
+
+4. Copy the Zondax contracts with the methods you wish to call from [the contracts folder](https://github.com/filecoin-project/filecoin-solidity/tree/master/contracts/v0.8) into `libs`.
+
+### Call a built-in actor
+
+Once you’ve either imported particular contracts manually or simply installed `filecoin.sol` using npm, create a callable method to access the built-in actor methods the way you normally would in a Solidity smart contract. Working examples of smart contracts that call built-in actor methods are available in the [reference guide](call-built-in-actors.md#filecoin.sol).
diff --git a/smart-contracts/developing-contracts/client-contract-tutorial.md b/smart-contracts/developing-contracts/client-contract-tutorial.md
new file mode 100644
index 000000000..77c5aeca4
--- /dev/null
+++ b/smart-contracts/developing-contracts/client-contract-tutorial.md
@@ -0,0 +1,131 @@
+---
+description: >-
+ The Client Contract allows developers to create storage deals programmatically
+ via smart contracts.
+---
+
+# Client contract tutorial
+
+In this tutorial, we will cover the background of creating storage deals via smart contracts and how to create storage deals with smart contracts on the FEVM (Ethereum Virtual Machine on top of the Filecoin blockchain). Before continuing, you will need the following software preinstalled on your computer:
+
+* Git
+* NodeJS
+* Yarn or NPM (Yarn is used in this tutorial)
+* A code editor such as VS Code
+* A wallet with [Calibration testnet FIL](../../networks/calibration/)
+
+{% embed url="https://www.youtube.com/watch?v=27EV3gQGY9k" %}
+
+## Workflows
+
+Before we get started, let’s quickly cover the two workflows for creating storage deals.
+
+### Regular deal creation workflow
+
+Filecoin is a blockchain tailor-made for processing storage deals. Before the Filecoin virtual machine was implemented into the network, storage deals could only be created using the following workflow:
+
+
+
+This process requires a lot of different actions to be taken by both the client and the storage provider. But with the advent of the Filecoin Virtual Machine, smart contracts can be deployed on top of the Filecoin blockchain. This allows developers to access and now create Filecoin Storage Deals within smart contracts! And thus, now developers can also create new storage deals on the Filecoin blockchain. This reduces the number of actions clients, and storage providers have to take to generate storage deals.
+
+### Smart contract deal creation workflow
+
+To create storage deals, smart contracts need a specific Solidity event called `DealProposalCreate`. Storage Providers running [Boost software](https://boost.filecoin.io/) can then listen for this event and, if the deal is acceptable to them, pick up the deal automatically. The workflow can be seen below.
+
+
+
+## Steps
+
+Let’s now run through how to create storage deals via smart contracts.
+
+### Setup
+
+First, let’s grab the kit and set up the development environment.
+
+1. Clone the Filecoin virtual machine deal-making kit, including all submodules:
+
+```sh
+git clone --recurse-submodules https://github.com/filecoin-project/fvm-starter-kit-deal-making.git
+```
+
+2. This will copy the fvm deal-making kit into your current directory and initiate the `go-generate-car` submodule.
+3. Moving into the `fvm-starter-kit-deal-making` directory and grab all the dependencies using `yarn`:
+
+```sh
+cd fvm-starter-kit-deal-making
+yarn
+```
+
+4. Now that all the packages are downloaded, we will need to create a `.env` file with your private key. This is so the hardhat kit knows what wallet to use for transactions. Open up the repo in your code editor of choice and find the file titled `.env.example`. Rename the file to `.env`.
+
+```sh
+mv .env.example .envll
+```
+
+5. Replace the example private key with your actual private key. If you are using Metamask, follow [this tutorial to get your private key](https://support.metamask.io/hc/en-us/articles/360015289632-How-to-export-an-account-s-private-key). Remember to take precautions to never share your private key with anyone or check it into Git! The `.gitignore` of the hardhat kit is already set to ignore `.env` files.
+6. Deploy the contracts with `hardhat`:
+
+```sh
+yarn hardhat deploy
+```
+
+7. This should compile and deploy all the contracts, including the client contract, which is the one we will be interacting with. Copy and take note of the address of the deployed contract for later.
+
+### Preparing a file for storage
+
+Before storing a file with a storage provider, it needs to be prepared by turning it into a `.car` file, and the metadata must be recorded. To do this the Hardhat kit has a tool [which can do this for you](https://github.com/filecoin-project/fevm-hardhat-kit/tree/main/tools). However, to keep things nice and simple, we’re going to use the [FVM Data Depot website](https://data.lighthouse.storage/). This website will automatically convert files to the `.car` format, output all the necessary metadata, and act as an HTTP retrieval point for the storage providers.
+
+1. Go to the [FVM Data Depot website](https://data.lighthouse.storage/) and create an account.
+2. Click **Upload File** and select a file you wish to upload.
+3. Select the **File ID** of the file to read all the relevant metadata. Make a note of the:
+
+ * Piece CID
+ * Payload CID
+ * Car size
+ * Piece size
+ * URL
+
+ We’ll use this information in the next step when invoking the `MakeDealProposal` method.
+
+### Invoke the MakeDealProposal method
+
+Now that we have the `.car` file prepared in the data depot, we can invoke the MakeDealProposal method on the smart contract we deployed earlier. To do this, we will run the `make-deal-proposal` task in Hardhat. There are quite a few parameters to include in this call:
+
+* `contract`: the address of your deployed `ClientContract.sol`
+* \`piece-cid: gathered from the previous step.
+* `piece-size`: gathered from the previous step.
+* `car-size`: gathered from the previous step.
+* `start-epoch`: The block number you want the deal to begin on. It should be a block in the future. You can find the current block number on [FilFox Calibration](https://calibration.filfox.info/en).
+* `end-epoch`: The block number you want the deal to end on. It should be a block in the future and after the `Start-Epoch`. You can find the current block number on [FilFox Calibration](https://calibration.filfox.info/en).
+* `location-ref`: The location of where the storage provider can find the .car file. This is the `URL` from the previous step.
+
+When calling the `make-deal-proposal` task in Hardhat, your command will look something like this:
+
+```shell
+yarn hardhat make-deal-proposal \
+ --contract 0x0219eB1740C315fe5e20612D7E13AE2A883dB3f4 \
+ --piece-cid baga6ea4seaqn4eomxfk3ttog7lnvlvedu7nia377w4gotw2pm746k6kq7gwe6ga \
+ --piece-size 2097152 \
+ --verified-deal false \
+ --car-size 1439368 \
+ --label baga6ea4seaqn4eomxfk3ttog7lnvlvedu7nia377w4gotw2pm746k6kq7gwe6ga \
+ --start-epoch 180000 \
+ --end-epoch 700000 \
+ --storage-price-per-epoch 0 \
+ --provider-collateral 0 \
+ --client-collateral 0 \
+ --extra-params-version 1 \
+ --location-ref "https://bafybeidguwwno5ohjss7g4l6ygvyy3dzxxrkkgtxqkobnnxnu62aw4ipxa.ipfs.w3s.link/ipfs/bafybeidguwwno5ohjss7g4l6ygvyy3dzxxrkkgtxqkobnnxnu62aw4ipxa/baga6ea4seaqn4eomxfk3ttog7lnvlvedu7nia377w4gotw2pm746k6kq7gwe6ga.car \
+ --skip-ipni-announce true \
+ --remove-unsealed-copy true \
+```
+
+Parameters such as the `collateral` and `price-per-epoch` are set to `0`. On mainnet, these would be determined by storage providers, but since this is on the Calibration testnet, the storage providers should pick up the jobs even with these parameters set to `0`.
+
+### Storage provider picks up the job
+
+Now if you’ve invoked the task with all the correct parameters, the method will execute on-chain and emit an event that Boost storage providers will be listening to. If the deal is well-formed and the parameters are acceptable, they will download the .car file, double-check to ensure the `piece-cid` and `piece-size` match the deal and publish your storage deal! This could take up to a day. Once the deal is published, you can find it on a [Calibration testnet block explorer](../../networks/calibration/explorers.md). The client in the deal should be the `t4` address of the smart contract we called `MakeStorageDeal` on.
+
+## Conclusion
+
+During this tutorial, we have shown the significance of making deals using smart contracts and then walked through making a storage deal using the FVM deal-making kit and web3.storage. Developers can make use of this workflow to integrate decentralized storage on Filecoin with their smart contracts and decentralized applications.
diff --git a/smart-contracts/developing-contracts/filecoin.sol.md b/smart-contracts/developing-contracts/filecoin.sol.md
new file mode 100644
index 000000000..45e42cc96
--- /dev/null
+++ b/smart-contracts/developing-contracts/filecoin.sol.md
@@ -0,0 +1,109 @@
+---
+description: >-
+ External Solidity libraries can help developers create their applications
+ quicker by offloading some of the work to already existing smart contracts.
+---
+
+# Filecoin.sol
+
+The Filecoin Solidity library allows developers to:
+
+* Interact with Filecoin built-in actors.
+* Simplify the interaction with the Filecoin storage market, miner actors, the verified registry for FIL+ automation, and more.
+* Filecoin-specific data types such as `FilAddress`, `FilActorID`, `CIDs`, storage deals, and more.
+* OpenZeppelin-like utilities specific to Filecoin.
+* CBOR serialization and deserialization for parameters and return data.
+
+In order to access exported Filecoin built-in actor methods in your smart contract, you will need to import Filecoin.sol in your Solidity project. As they are embeddable libraries, they don’t need to be present on-chain. You can just import the library you desire and call its methods.
+
+Once the library is installed in your project, you can write Solidity code to call APIs from different built-in actors using Filecoin-specific data types or data conversions from the utility library.
+
+## Add to your contract
+
+Run the following command in your Solidity project, which is created using any smart contract development framework such as Hardhat, Truffle, or Foundry.
+
+```shell
+npm install @zondax/filecoin-solidity
+```
+
+## Usage
+
+Once installed, you can call built-in actors in the library after importing them into your smart contract.
+
+```solidity
+// contracts/MyNFT.sol
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.13;
+
+import "@zondax/filecoin-solidity/contracts/v0.8/MarketAPI.sol";
+import "@zondax/filecoin-solidity/contracts/v0.8/types/MarketTypes.sol";
+import "@zondax/filecoin-solidity/contracts/v0.8/types/CommonTypes.sol";
+
+contract MyFilecoinContract {
+ ...
+}
+```
+
+You can find the list of supported built-in actors and methods in [Zondax’s Filecoin.Sol documentation](https://docs.zondax.ch/fevm/filecoin-solidity/api/). You can access certain Filecoin-related features through these actors:
+
+* `AccountAPI.sol`: validates signatures from an address.
+* `MinerAPI.sol`: manages storage provider operation.
+* `MarketAPI.sol`: manages storage deals on Filecoin.
+* `PowerAPI.sol`: manages storage power for each storage provider and the whole network.
+* `DataCap.sol` and `VerifRegAPI.sol`: manages DataCap and verified clients for Filecoin Plus.
+
+Unlike OpenZeppelin contracts, you do not need to inherit contracts to use their features. With Filecoin.sol you just need to call the methods from those solidity contracts:
+
+```solidity
+CommonTypes.FilActorId minerID = CommonTypes.FilActorId.wrap(1130);
+CommonTypes.BigInt memory returnData = MinerAPI.getVestingFunds(minerID);
+```
+
+Filecoin.sol also offers several utility libraries to help developers to convert data types for different variables, including FILAddress, BigIntegers, ActorID, and CBOR. You can import those libraries from the `utils` folder:
+
+```solidity
+import "@zondax/filecoin-solidity/contracts/v0.8/utils/Actor.sol";
+import "@zondax/filecoin-solidity/contracts/v0.8/utils/BigInts.sol";
+import "@zondax/filecoin-solidity/contracts/v0.8/utils/FilAddresses.sol";
+```
+
+## Example
+
+We can write a simple Solidity smart contract to query basic information for a Filecoin storage deal:
+
+```solidity
+// SPDX-License-Identifier: UNLICENSED
+pragma solidity ^0.8.17;
+
+import "@zondax/filecoin-solidity/contracts/v0.8/MarketAPI.sol";
+import "@zondax/filecoin-solidity/contracts/v0.8/types/MarketTypes.sol";
+import "hardhat/console.sol";
+
+contract StorageDealQuery {
+
+ // Query the start epoch and duration(in epochs) of a deal proposal.
+ function get_deal_term(uint64 dealID) public returns (MarketTypes.GetDealTermReturn memory) {
+ return MarketAPI.getDealTerm(dealID);
+ }
+
+ // Query the storage provider who stores the date for this deal.
+ function get_deal_provider(uint64 dealID) public returns (uint64) {
+ return MarketAPI.getDealProvider(dealID);
+ }
+
+ // Query the collateral required from the storage provider for this deal proposal.
+ function get_deal_provider_collateral(uint64 dealID) public returns (CommonTypes.BigInt memory) {
+ return MarketAPI.getDealProviderCollateral(dealID);
+ }
+
+}
+```
+
+#### Next steps
+
+Check out these links to learn more about the Filecoin.sol library.
+
+* [Filecoin-Solidity GitHub](https://github.com/filecoin-project/filecoin-solidity)
+* [Zondax Documentation](https://docs.zondax.ch/fevm/filecoin-solidity/)
+* [Built-In Actor APIs](https://docs.zondax.ch/fevm/filecoin-solidity/api/)
+* [FEVM-Hardhat-K](https://github.com/filecoin-project/FEVM-Hardhat-Kit/)
diff --git a/smart-contracts/developing-contracts/foundry.md b/smart-contracts/developing-contracts/foundry.md
new file mode 100644
index 000000000..07c3f7085
--- /dev/null
+++ b/smart-contracts/developing-contracts/foundry.md
@@ -0,0 +1,66 @@
+---
+description: >-
+ Foundry is a fast toolkit for application development written in Rust equipped
+ with a testing framework, as well as utilities for interacting with smart
+ contracts and getting chain data.
+---
+
+# Foundry
+
+The template repository contains submodules and remappings for ds-test assertions for testing, solmate building blocks for contracts, and forge-std to layer on top of EVM cheat codes to improve UX.
+
+## Prerequisites
+
+You must have the following installed:
+
+* [Git](https://git-scm.com/)
+* [Yarn](https://yarnpkg.com/)
+
+You should also have an address on the Filecoin Calibration testnet. See the [MetaMask setup page](../../basics/assets/metamask-setup.md) for information on how to get an address. You also need test `tFIL` in your wallet.
+
+## Steps
+
+1. Clone the `xBalbinus/fevm-foundry-kit` repository and move into the `fevm-foundry-kit` directory:
+
+```
+git clone https://github.com/xBalbinus/fevm-foundry-kit/tree/main.git
+cd fevm-foundry-kit
+```
+
+2. Install the project dependencies with Yarn:
+
+```
+yarn install
+```
+
+3. Export your private key from MetaMask. See the [MetaMask documentation](https://support.metamask.io/hc/en-us/articles/360015289632-How-to-export-an-account-s-private-key) to find out how to export your private key.
+4. In your `.env.example`, create an environment variable called `PRIVATE_KEY` and paste in the private key from MetaMask. Also, do the same for the `HYPERSPACE_RPC_URL`. Then rename the file to `.env`:
+
+```
+PRIVATE_KEY=eed8e9d727a647f7302bab440d405ea87d36726e7d9f233ab3ff88036cfbce9c
+HYPERSPACE_RPC_URL=https://api.calibration.node.glif.io/rpc/v1
+```
+
+5. Inside the `src` folder in a contract called `SimpleCoin.sol`. Deploy this contract using Foundry:
+
+```shell
+forge build
+forge script script/SimpleCoin.s.sol:MyScript --rpc-url https://api.calibration.node.glif.io/rpc/v1 --broadcast --verify -vvvv
+
+# ...
+#
+# Script ran successfully.
+# Gas used: 234642
+```
+
+6. Alternatively, you can do the same using the `forge create` command:
+
+```
+forge build
+
+forge create --rpc-url https://api.calibration.node.glif.io/rpc/v1 --private-key $PRIVATE_KEY src/SimpleCoin.sol:SimpleCoin
+```
+
+7. You can now interact with your contract using the contract address given by Foundry.
+
+Done! For more information, see the [Foundry book](https://book.getfoundry.sh/).
diff --git a/smart-contracts/developing-contracts/get-test-tokens.md b/smart-contracts/developing-contracts/get-test-tokens.md
new file mode 100644
index 000000000..12b4772cf
--- /dev/null
+++ b/smart-contracts/developing-contracts/get-test-tokens.md
@@ -0,0 +1,103 @@
+---
+description: >-
+ Test funds are available to developer so that they can test their smart
+ contracts and applications within the confines of a test network. This page
+ covers how to get test funds.
+---
+
+# Get test tokens
+
+## Calibration testnet
+
+MetaMask is one of the easier ways to manage addresses on the Calibration testnet. MetaMask uses the `t4` [address type](../filecoin-evm-runtime/address-types.md), which allows developers to create and manage Solidity contracts easily. Follow the [MetaMask setup guide](../../basics/assets/metamask-setup.md) if you havn’t set up an address in your MetaMask wallet yet. w
+
+1. In your browser, open MetaMask and copy your address to your clipboard:
+
+ 
+2. Go to [faucet.calibration.fildev.network](https://faucet.calibration.fildev.network/) and click **Faucet** from the menu.
+
+ 
+3. Paste your address into the address field, complete the **I am human** CAPTCHA, and then click **Send**:
+
+ 
+4. The faucet should give you a link to the transaction:
+
+ 
+5. The block explorer will show you the transaction history for your address. After a couple of minutes, you should see 5 `tFIL` transferred to your address.
+
+ 
+6. Open MetaMask to confirm that you received the `tFIL`:
+
+ 
+
+That’s all there is to it! Getting `tFil` is easy!
+
+## Local testnet
+
+Before we begin, you must have a local testnet running. Follow the [Run a local network guide](https://docs.filecoin.io/networks/local-testnet/set-up/) if you haven’t got a local testnet set up yet.
+
+1. Change directory to where you created the `lotus` and `lotus-miner` binaries. If you followed the [Run a local network guide](https://docs.filecoin.io/networks/local-testnet/set-up/) these binaries will be in `~/lotus-devnet`:
+
+ ```
+ ```
+
+* ```shell
+ cd ~/lotus-devnet
+ ```
+* View the wallets available on this node with `lotus wallet list`:
+
+ ```
+ ```
+* ```shell
+ ./lotus wallet list
+ ```
+
+ ```plaintext
+ Address Balance Nonce Default
+ t1snly7vh4mjtjznwze56ihrdhzfwvbajywwmrenq 0 FIL 0
+ t3q4o7gkwe7p7xokhgws4rwntj7yqfhpj5pm6cqc7dycl7cwk4uvgh2odwdvge5re7ne5gcc6xluifss5uu5cq 49999999.999763880085417692 FIL 2 X
+ ```
+* Create the send request with `lotus send`, supplying the pre-mined `t3q4o...` address as the `--from` address, the new `t1snl...` address as the receiving address, and the amount of FIL we want to send:
+
+ ```
+ ```
+
+```shell
+./lotus send --from
+```
+
+For example:
+
+```
+```
+
+* ```shell
+ ./lotus send --from t3q4o7gkwe7p7xokhgws4rwntj7yqfhpj5pm6cqc7dycl7cwk4uvgh2odwdvge5re7ne5gcc6xluifss5uu5cq t1snly7vh4mjtjznwze56ihrdhzfwvbajywwmrenq 2000
+ ```
+
+ ```plaintext
+ bafy2bzaceaqzbgiazwvtpago6wpkxl42puxfkvwv5cwjpime2irqatamji2bq
+ ```
+* Check the balance of your new `t1snl...` address with `lotus wallet balance`:
+
+ ```
+ ```
+
+```shell
+./lotus wallet balance
+```
+
+For example:
+
+```
+```
+
+1. ```shell
+ ./lotus wallet balance t1snly7vh4mjtjznwze56ihrdhzfwvbajywwmrenq
+ ```
+
+ ```plaintext
+ 2000 FIL
+ ```
+
+If you want to manage your local testnet tokens in MetaMask you will need to create a `t4` address. You can create a `t4` address using `lotus wallet new deleated`. Once you have a `t4` address you can [connect MetaMask to your local testnet](https://docs.filecoin.io/basics/assets/metamask-setup/) to see the new balance within the MetaMask extension.
diff --git a/smart-contracts/developing-contracts/hardhat.md b/smart-contracts/developing-contracts/hardhat.md
new file mode 100644
index 000000000..1273359e9
--- /dev/null
+++ b/smart-contracts/developing-contracts/hardhat.md
@@ -0,0 +1,112 @@
+---
+description: >-
+ Hardhat is an open-source development environment designed to provide
+ developers with a flexible and extensible framework for building, testing, and
+ deploying smart contracts.
+---
+
+# Hardhat
+
+While originally created for the Ethereum blockchain, the Filecoin Ethereum Virtual Machine runtime (FEVM) allows Hardhat to be used to develop and deploy smart contracts on the Filecoin network.
+
+## Quickstart
+
+The [FEVM Hardhat kit](https://github.com/filecoin-project/FEVM-Hardhat-Kit) is a starter hardhat project for developing, deploying, and testing Solidity smart contracts on the Filecoin network. It functions in the same way as other Hardhat development kits. Check out the quickstart below to test it out!
+
+### Prerequisites
+
+This guide assumes you have the following installed:
+
+* [Yarn](https://yarnpkg.com/)
+* A Filecoin address stored in [MetaMask](../../basics/assets/metamask-setup.md)
+
+### Environment setup
+
+First, we need to grab the starter kit and install the dependencies.
+
+1. Clone the Hardhat starter kit and move into the new `fevm-hardhat-kit` directory:
+
+```
+git clone https://github.com/filecoin-project/fevm-hardhat-kit.git
+cd fevm-hardhat-kit
+
+# Cloning into 'fevm-hardhat-kit'...
+# remote: Enumerating objects: 758, done.
+# remote: Counting objects: 100% (725/725), done.
+#
+# ...
+```
+
+2. Use Yarn to install the project’s dependencies:
+
+```
+yarn install
+
+# [1/4] 🔍 Resolving packages...
+# [2/4] 🚚 Fetching packages...
+# [3/4] 🔗 Linking dependencies...
+#
+# ...
+#
+# ✨ Done in 16.34s.
+```
+
+3. Create an environment variable for your private key. Each wallet has a different process for exporting your private key - check your wallet’s official documentation.
+
+```sh
+export PRIVATE_KEY=''
+
+# For example
+# export PRIVATE_KEY='d52cd65a5746ae71cf3d07a8cf392ca29d7acb96deba7d94b19a9cf3c9f63022'
+```
+
+4. Always be careful when dealing with your private key. Double-check that you’re not hardcoding it anywhere or committing it to source control like GitHub. Anyone with access to your private key has complete control over your funds.
+5. Get the addresses associated with the private key from Hardhat:
+
+```sh
+yarn hardhat get-address
+
+# Ethereum address (this addresss should work for most tools): 0x11Fc070e5c0D32024c9B63c136913405e07C8c48
+# f4address (also known as t4 address on testnets): f410fch6aods4buzaete3mpatnejuaxqhzdci3j67vyi
+# ✨ Done in 1.40s.
+```
+
+Now that we’ve got the kit set up, we can start using it to develop and deploy our contracts.
+
+### Manage the contracts
+
+There are two main types of contracts:
+
+* Basic Solidity examples: Simple contracts to show off basic Solidity.
+* Filecoin API Examples: Contracts that demo how to use the Filecoin APIs in Solidity to access storage deals and other Filecoin-specific functions.
+
+Make sure that your account has funds. You won’t be able to deploy any contracts without `FIL` or `tFIL`.
+
+1. Run `hardhat deploy` to deploy all the contracts. This can take a few minutes:
+
+```sh
+yarn hardhat deploy
+
+# Compiled 18 Solidity files successfully
+# Wallet Ethereum Address: 0x11Fc070e5c0D32024c9B63c136913405e07C8c48
+# Deploying Simplecoin...
+#
+# ...
+#
+# ✨ Done in 211.76s.
+```
+
+2. Interact with the contracts using the available functions within the `tasks` folder. For example, you can get the balance of the `simple-coin` contract by calling the `get-balance` function:
+
+```sh
+yarn hardhat get-balance --contract '0xA855520fcCB6422976F7Ac78534edec2379Be5f6' --account '0x11Fc070e5c0D32024c9B63c136913405e07C8c48'
+
+# Reading SimpleCoin owned by 0x11Fc070e5c0D32024c9B63c136913405e07C8c48 on network calibration
+# Amount of Simplecoin owned by 0x11Fc070e5c0D32024c9B63c136913405e07C8c48 is 12000
+# Total amount of minted tokens is 12000
+# ✨ Done in 3.73s.
+```
+
+## Hardhat docs
+
+You can view the official Hardhat documentation over at [`hardhart.org/docs`](https://hardhat.org/docs).
diff --git a/smart-contracts/developing-contracts/remix.md b/smart-contracts/developing-contracts/remix.md
new file mode 100644
index 000000000..336e4e949
--- /dev/null
+++ b/smart-contracts/developing-contracts/remix.md
@@ -0,0 +1,150 @@
+---
+description: >-
+ The Filecoin EVM runtime allows developers to use Ethereum tooling, like
+ Remix, with the Filecoin network.
+---
+
+# Remix
+
+## Launch an ERC-20 token
+
+As a simple introduction, we’re going to use Remix to create an ERC-20 token on the Filecoin network. In this guide, we’re using the Calibration testnet, but this process is the same for mainnet.
+
+This guide assumes you’ve already connected your [MetaMask extension to a Filecoin network](../../basics/assets/metamask-setup.md).
+
+### Create a workspace
+
+In Remix, workspaces are where you can create a contract, or group of contracts, for each project. Let’s create a new workspace to create our new ERC-20 token.
+
+1. Open [remix.ethereum.org](https://remix.ethereum.org).
+2. Click the `+` icon next to **Workspaces** to create a new workspace:
+
+ 
+3. In the **Choose a template** dropdown, select **ERC 20**.
+4. Select the **Mintable** checkbox.
+5. Enter a fun name for your token in the **Workspace name** field. Something like `CorgiCoin` works fine.
+6. Click **OK** to create your new workspace.
+
+ 
+
+### Customize the contract
+
+The contract template we’re using is pretty simple. We just need to modify a couple of variables.
+
+1. Under the **contract** directory, click **MyToken.sol**.
+
+ 
+2. In the editor panel, replace `MyToken` with whatever you’d like to name your token. In this example, we’ll use `CorgiCoin`.
+
+ 
+3. On the same line, replace the second string with whatever you want the symbol of your token to be. In this example, we’ll use `CRG`.
+
+ 
+
+That’s all we need to change within this contract. You can see on line 4 that this contract is importing another contract from `@openzeppelin` for us, meaning that we can keep our custom token contract simple.
+
+### Compile
+
+1. Click the green play symbol at the top of the workspace to compile your contract. You can also press `CMD` + `s` on MacOS or `CTRL` + `s` on Linux and Windows.
+
+ 
+2. Remix automatically fetches the two `import` contracts from the top of our `.sol` contract. You can see these imported contracts under the `.deps` directory. You can browse the contracts there, but Remix will not save any changes you make.
+
+ 
+
+### Deploy
+
+Now that we’ve successfully compiled our contract, we need to deploy it somewhere! This is where our previous MetaMask setup comes into play.
+
+1. Click the **Deploy** tab from the left.
+
+ 
+2. Under the **Environment** dropdown, select **Injected Provider - MetaMask**.
+
+ 
+3. MetaMask will open a new window confirming that you want to connect your account to Remix.
+4. Click **Next**:
+
+ 
+5. Click **Connect** to connect your `tFIL` account to Remix.
+
+ 
+6. Back in Remix, under the **Account** field, you’ll see that it says something like `0x11F... (5 ether)`. This value is 5 `tFIL`, but Remix doesn’t support the Filecoin network, so it doesn’t understand what `tFIL` is. This isn’t a problem; it’s just a little quirk of using Remix.
+
+ 
+7. Under the **Contract** dropdown, ensure the contract you created is selected.
+
+ 
+8. Click **Deploy**.
+
+ 
+9. MetaMask will open a window and as you to confirm the transaction. Scroll down and click **Confirm** to have MetaMask deploy the contract. If you’re deploying to mainnet, we advise you to [adjust your gas fees](remix.md#adjusting-your-gas-fees) for a cheaper deployment.
+
+ 
+10. Back in Remix, a message at the bottom of the screen shows that the creation of your token is pending.
+
+ 
+11. Wait around 90 seconds for the deployment to complete.
+
+ 
+
+On the Filecoin network, a new set of blocks, also called a tipset, is created every thirty seconds. When deploying a contract, the transaction needs to be received by the network, and then the network needs to confirm the contract. This process takes around one to two tipsets to process – or around 60 to 90 seconds.
+
+## Use your contract
+
+Now that we’ve compiled and deployed the contract, it’s time to actually interact with it!
+
+### Mint your tokens
+
+Let’s call a method within the deployed contract to mint some tokens.
+
+1. Back in Remix, open the **Deployed Contracts** dropdown, within the **Deploy** sidebar tab.
+
+ 
+2. Expand the `mint` method. You must fill in two fields here: `to` and `amount`.
+
+ 
+3. The `to` field specifies where address you want these initial tokens sent. Open MetaMask, copy your address, and paste it into this field.
+
+ 
+4. This field expects an `attoFil` value. 1 `FIL` is equal to 1,000,000,000,000,000,000 `attoFil`. So if you wanted to mint 100 `FIL`, you would enter `100` followed by 18 zeros: `100000000000000000000`.
+5. Click **Transact**.
+
+ 
+6. MetaMask will open a window and ask you to confirm the transaction:
+
+ 
+
+Again, you must wait for the network to process the transaction, which should take about 90 seconds. You can move on to the next section while you’re waiting.
+
+### Add to MetaMask
+
+Currently, MetaMask has no idea what our token is or what it even does. We can fix this by explicitly telling MetaMask the address of our contract.
+
+1. Go back to Remix and open the **Deploy** sidebar tab.
+2. Under **Deployed Contracts**, you should see your contract address at the top. Click the copy icon to copy the address to your clipboard:
+
+ 
+3. Open MetaMask, select **Assets**, and click **Import your tokens**:
+
+ 
+4. In the **Token contract address** field, paste the contract address you just copied from Remix and then click **Add custom token**. MetaMask should autofill the rest of the information based on what it can find from the Filecoin network.
+
+ 
+5. Click **Import token**:
+6. You should now be able to see that you have 100 of your tokens within your MetaMask wallet!
+
+ 
+
+And that’s it! Deploying an ERC-20 token on Filecoin is simple!
+
+### Adjusting your gas fees
+
+Remix uses a default of 2.5 nanoFIL per gas as a priority fee, which is usually too high for the Filecoin network. If you don’t adjust this, you may end up overpaying when deploying to mainnet. We recommend that you switch from the site-suggested gas fees to oracle-supplied gas fees when deploying your contract.
+
+1. When the deployment transaction confirmation pop-up window shows up, click on **Site suggested**.
+
+ 
+2. Switch to **Market**, **Aggressive**, or **Low**. The **Market** option is generally suitable for most situations.
+
+ 
diff --git a/smart-contracts/developing-contracts/solidity-libraries.md b/smart-contracts/developing-contracts/solidity-libraries.md
new file mode 100644
index 000000000..88b51192e
--- /dev/null
+++ b/smart-contracts/developing-contracts/solidity-libraries.md
@@ -0,0 +1,125 @@
+---
+description: >-
+ With Filecoin Virtual Machine (FVM), Solidity developers can use existing
+ libraries listed on this page in their FVM smart contracts.
+---
+
+# Solidity libraries
+
+## OpenZeppelin
+
+[OpenZeppelin](https://www.openzeppelin.com/contracts) provides a library of battle-tested smart contract templates, including widely used implementations of ERC token standards. For a guided example that implements an ERC20 token on the Filecoin network, see [Example using an ERC20 contract](../fundamentals/erc-20-quickstart.md).
+
+### Benefits
+
+OpenZeppelin offers the following to smart contract developers:
+
+* Implementations of standards like ERC20, ERC721, and ERC1155.
+* Flexible access control schemes like `Ownable`, `AccessControl`, and `onlyRole`.
+* Useful and secure utilities for signature verification, `SafeMath`, etc..
+
+Token standards, such as [ERC20](https://docs.openzeppelin.com/contracts/4.x/erc20), are the most widely used smart contract libraries from OpenZeppelin. These contracts, listed below, implement both _fungible_ and _non-fungible_ tokens:
+
+* [ERC20](https://docs.openzeppelin.com/contracts/4.x/erc20) is the simplest and most widespread token standard for fungible assets.
+* [ERC721](https://docs.openzeppelin.com/contracts/4.x/erc721) is the standard solution for non-fungible tokens and is often used for collectibles and games.
+* [ERC777](https://docs.openzeppelin.com/contracts/4.x/erc777) provides a richer standard for fungible tokens, supporting new use cases and backwards compatibility with ERC20.
+* [ERC1155](https://docs.openzeppelin.com/contracts/4.x/erc1155) is a new standard for _multi-tokens_, where a single contract represents multiple fungible and non-fungible tokens, and operations are batched for increased gas efficiency.
+
+### Using OpenZeppelin with FVM
+
+The _general_ procedure for using OpenZeppelin with FVM is as follows:
+
+1. Install OpenZeppelin. For example, using `npm`:
+
+```
+npm install @openzeppelin/contracts
+```
+
+2. Import the specific library you want to use.
+3. In your smart contract, inherit the library.
+
+Thanks to the FVM, your contract can be integrated and deployed on the Filecoin network with OpenZeppelin inheritance. For a guided example that implements an ERC20 token on the Filecoin network, see [Example using an ERC20 contract](../fundamentals/erc-20-quickstart.md).
+
+### Example using an ERC-20 contract
+
+In the following tutorial, you’ll write and deploy a smart contract that implements the [ERC-20](https://docs.openzeppelin.com/contracts/4.x/erc20) on the Calibration testnet using Remix and MetaMask:
+
+**Prerequisites**
+
+Let’s take an ERC20 contract as an example to write and deploy it on the Calibration testnet using Remix & MetaMask:
+
+* Remix.
+* MetaMask.
+* [MetaMask connected to the Calibration testnet](../../networks/calibration/).
+* Test tokens (tFIL) [from the faucet](https://faucet.calibration.fildev.network/funds.html).
+
+**Procedure**
+
+In this procedure, you will create, deploy, mint and send an [ERC20](https://docs.openzeppelin.com/contracts/4.x/erc20) token on Calibration using Remix and MetaMask.
+
+1. Navigate to [remix.ethereum.org](https://remix.ethereum.org/).
+2. Next to **Workspaces**, click the **+** icon to create a new workspace.
+3. In the **Choose a template** dropdown, select **ERC 20** along with the **Mintable** checkbox.
+
+ 
+4. Click **OK**.
+5. In the **contract** directory, open **MyToken.sol**.
+6. Set the token `` and ``:
+
+```solidity
+// contracts/GLDToken.sol
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.0;
+
+import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
+
+contract MyToken is ERC20 {
+ constructor(uint256 initialSupply) ERC20(, ) {
+ _mint(msg.sender, initialSupply);
+ }
+}
+```
+
+7. Next, compile and deploy the contract on Filecoin.
+8. At the top of the workspace, click the green play symbol to compile the contract.
+
+ 
+9. Once the contract compiles, open the **Deploy** tab on the left.
+10. Under the **Environment** dropdown, select **Injected Provider - MetaMask**.
+11. In the MetaMask popup window, select **Confirmed connection**.
+
+ 
+12. Click **Deploy**, and confirm the transaction on MetaMask. Your token contract will be deployed to the Calibration testnet once the network confirms the transaction.
+13. In Remix, open the **Deployed Contracts** dropdown.
+14. In the `mint` method, set:
+
+ * `to` to your wallet address.
+ * `amount` to `100000000000000000000` (1 `FIL`).
+
+ 
+15. Click **Transact**.
+16. In MetaMask, confirm the transaction.
+
+Once the network processes the transaction, the token is minted and sent to your network address. Congratulations, you’ve completed the tutorial!
+
+### Additional resources
+
+Learn more about OpenZeppelin with the following resources:
+
+* [OpenZeppelin Contracts website](https://www.openzeppelin.com/contracts)
+* [Documentation](https://docs.openzeppelin.com/contracts/4.x/)
+* [GitHub](https://github.com/OpenZeppelin/openzeppelin-contracts)
+
+## DappSys
+
+The DappSys library provides safe, simple, and flexible Ethereum contract building blocks for common Ethereum and Solidity use cases.
+
+* [Documentation](https://dappsys.readthedocs.io/en/latest/)
+* [GitHub](https://github.com/dapphub/dappsys)
+
+## 0x protocol
+
+The 0x protocol library provides a set of secure smart contracts that facilitate peer-to-peer exchange of Ethereum-based assets.
+
+* [Documentation](https://docs.0x.org/introduction/introduction-to-0x)
+* [GitHub](https://github.com/0xProject)
diff --git a/smart-contracts/developing-contracts/verify-a-contract.md b/smart-contracts/developing-contracts/verify-a-contract.md
new file mode 100644
index 000000000..d25f9d483
--- /dev/null
+++ b/smart-contracts/developing-contracts/verify-a-contract.md
@@ -0,0 +1,57 @@
+---
+description: >-
+ This page lists various Filecoin Ethereum Virtual Machine (FEVM) explorers
+ with verification tools, and provides a tutorial on how to verify a contract
+ using Filfox.
+---
+
+# Verify a contract
+
+## Verification tools
+
+The following FEVM-compatible chain explorers offer contract verification tools. For more information, click the appropriate link below.
+
+* [Filfox](https://filfox.info/en/contract)
+* [Starboard](https://fvm.starboard.ventures/explorer/verifier)
+* [Beryx](https://beryx.zondax.ch/contract\_verifier)
+
+## Verification tutorial with Filfox
+
+The following guide walks you through the process of contract verification using the [Filfox](https://filfox.info/en/contract) explorer.
+
+### Prerequisites
+
+* A smart contract (`.sol` file)
+* [Remix](https://remix.ethereum.org/)
+
+### Procedure
+
+1. Open Remix.
+2. In the **File Explorer** sidebar tab, under **contracts**, right click on the contract you want to verify.
+3. From the menu, select **generate UML** to flatten the `.sol` file and ensure that all components and tasks are included.
+
+ A new contract with the name `_flattened.sol` is generated below your original contract.
+4. Ensure that the license and Solidity version in the flattened contract is the same as in your original contract.
+5. Click **Save**.
+6. Right click on `_flattened.sol`.
+7. In the menu, click **Download** to save the file.
+8. Note the following information, as you will need it later:
+ * The address of your deployed contract
+ * The contract license type (if any)
+ * The Solidity compiler version
+9. Navigate to [Filfox](https://filfox.info/en/contract).
+10. In the form, enter the information noted previously for the deployed contract you would like to verify:
+ * The address
+ * The license type
+ * The compiler version
+11. Click **Continue**.
+12. Click \*_Select .sol files_.
+13. Select your flattened `.sol` file.
+14. Click **Verify and Publish**.
+
+ Success! Your contract is now verified.
+15. To view your verified contract:
+ 1. Enter the address of the contract in the [Filfox search bar](https://filfox.info/).
+ 2. Scroll down the contract page and select the **Contract** tab.
+
+ A **Contract Source Code Verified** banner is displayed, along with contract information and source code. You can also [view other verified contracts on Filfox](https://filfox.info/en/stats/verified-contracts).
diff --git a/smart-contracts/filecoin-evm-runtime/README.md b/smart-contracts/filecoin-evm-runtime/README.md
new file mode 100644
index 000000000..af3876b1c
--- /dev/null
+++ b/smart-contracts/filecoin-evm-runtime/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ This section explains what the Filecoin EVM-runtime (FEVM) is, and how
+ developers can use it to interact with the Filecoin network.
+---
+
+# Filecoin EVM-runtime
+
diff --git a/smart-contracts/filecoin-evm-runtime/actor-types.md b/smart-contracts/filecoin-evm-runtime/actor-types.md
new file mode 100644
index 000000000..64174bb29
--- /dev/null
+++ b/smart-contracts/filecoin-evm-runtime/actor-types.md
@@ -0,0 +1,54 @@
+---
+description: >-
+ In the Filecoin network, an address is a unique identifier that refers to an
+ actor in the Filecoin state. All actors in Filecoin have a corresponding
+ address which varies from the different usages.
+---
+
+# Actor types
+
+The Filecoin EVM runtime introduces three new actor types:
+
+1. [Placeholder actors](actor-types.md#placeholder).
+2. [Ethereum-style accounts](actor-types.md#ethereum-style-account), also called `EthAccount`.
+3. [EVM smart contracts](actor-types.md#evm-smart-contract).
+
+## Placeholder
+
+A _placeholder_ is a particular type of pseudo-actor that holds funds until an actual actor is deployed at a specific address. When funds are sent to an address starting with `f410f` that doesn’t belong to any existing actor, a _placeholder_ is created to hold the said funds until either an account or smart contract is deployed to that address.
+
+A placeholder can become a _real_ actor in one of two ways:
+
+1. A message is sent from the account that would exist at that placeholder’s address. If this happens, the placeholder is automatically upgraded into an account.
+2. An EVM smart contract is deployed to the address.
+
+## Ethereum-style account
+
+An Ethereum-style account is the Filecoin EVM runtime equivalent of an account with an `f1` or `f3` address, also known as native accounts. However, there are a few key differences:
+
+1. These accounts have `0x`-style addresses and an equivalent `f`-style address starting with `f410f`.
+2. Messages from these accounts can be sent with Ethereum wallets like MetaMask by connecting the wallet to a Filecoin client.
+3. These accounts can be used to transfer funds to native or Ethereum-style.
+4. They can be used to call EVM smart contracts and can be used to deploy EVM smart contracts. However, they cannot be used to call native actors such as multisig or miner actors.
+
+## EVM smart contract
+
+An EVM smart contract actor hosts a single EVM smart contract. Every EVM smart contract will have a `0x`-style address.
+
+### Deploying
+
+An EVM smart contract can be deployed in one of three ways:
+
+1. An existing EVM smart contract can use the EVM’s `CREATE`/`CREATE2` opcode.
+2. Ethereum-native tooling can be used in conjunction with an Ethereum-style account such as [Remix](../developing-contracts/remix.md) or [Hardhat](../developing-contracts/hardhat.md).
+3. A native account can call method `4` on the Ethereum account manager `f010`, passing the EVM init code as a CBOR-encoded byte-string (major type 2) in the message parameters.
+
+### Calling
+
+An EVM smart contract may be called in one of three ways:
+
+1. An EVM smart contract can use the EVM’s `CALL` opcode.
+2. Ethereum-native tooling, like [MetaMask](../../basics/assets/metamask-setup.md), can be used in conjunction with an Ethereum-style account.
+3. Finally, a native account can call method `3844450837` (`FRC42(InvokeEVM)`):
+ 1. The input data should either be empty or encoded as a CBOR byte string.
+ 2. The return data will either be empty or encoded as a CBOR byte string.
diff --git a/smart-contracts/filecoin-evm-runtime/address-types.md b/smart-contracts/filecoin-evm-runtime/address-types.md
new file mode 100644
index 000000000..d9d02d548
--- /dev/null
+++ b/smart-contracts/filecoin-evm-runtime/address-types.md
@@ -0,0 +1,159 @@
+---
+description: >-
+ In the Filecoin network, an address is a unique identifier that refers to an
+ actor in the Filecoin state. All actors in Filecoin have a corresponding
+ address which varies from the different usages.
+---
+
+# Address types
+
+Filecoin has five address classes, and actors tend to have _multiple_ addresses. Furthermore, each address class has its own rules for converting between binary and text.
+
+The goal of using different types of addresses is to provide a robust address format that is scalable, easy to use, and reliable. These addresses encode information including:
+
+* Network prefix: indicates the network the actor belongs to.
+* Protocol indicator: identify the type and version of this address.
+* Payload: identify the actor according to the protocol.
+* Checksum: validate the address.
+
+Filecoin addresses can be represented either as raw bytes or a string. Raw bytes format will always be used on-chain. An address can also be encoded to a string, including a checksum and network prefix. The string format will never appear on-chain and is only for human-readable purposes.
+
+Filecoin address can be broken down like this:
+
+| Network prefix | Protocol indicator | Payload | Checksum |
+| -------------- | ----------------------------------- | --------- | -------- |
+| `f` / `t` | 1 byte: `0` / `1` / `2` / `3` / `4` | _n_ bytes | 4 bytes |
+
+The network prefix is prepended to an address when encoding to a string. The network prefix indicates which network an address belongs to. Network prefixes never appear on-chain and are only used when encoding an address to a human-readable format.
+
+* `f` - addresses on the Filecoin mainnet.
+* `t` - addresses used on any Filecoin testnet.
+
+The protocol indicator identifies the address type, which describes how a method should interpret the information in the `payload` field of an address.
+
+* `0`: An ID address.
+* `1`: A wallet address generated from a secp256k public key.
+* `2`: An actor address.
+* `3`: A wallet address generated from BLS public key.
+* `4`: A delegated address for user-defined foreign actors:
+ * `410`: Ethereum-compatible address space managed by the Ethereum address manager (EAM). Each 410 address is equivalent to an 0x address.
+
+Each address type is described below.
+
+## ID addresses
+
+All addresses have a short integer assigned to them by `InitActor` sequentially, a unique actor that can create _new_ actors. The integer that gets assigned is the ID of that actor. An _ID address_ is an actor’s ID prefixed with the network identifier and the protocol indicator. Therefore, any address in the Filecoin network has a unique ID address assigned to it.
+
+The mainnet burn account ID address is `f099` and is structured as follows:
+
+```plaintext
+ Protocol Indicator
+ |
+f 0 9 9
+| |
+| Actor ID
+|
+Network identifier
+```
+
+## Actor addresses
+
+Addressed representing an actor deployed through the init actor in the Filecoin network. It provides a way to create robust addresses for actors not associated with a public key. They are generated by taking a `sha256` hash of the output of the account creation.
+
+Actor addresses are often referred to by their shorthand, `2`.
+
+## Wallet addresses
+
+Addresses managed directly by users, like accounts, are derived from a public-private key pair. If you have access to a private key, you can sign messages sent from that wallet address. The public key is used to derive an address for the actor. Public key addresses are referred to as _robust addresses_ as they do not depend on the Filecoin chain state.
+
+Public key addresses allow devices, like hardware wallets, to derive a valid Filecoin address for your account using just the public key. The device doesn’t need to ask a remote node what your ID address is. Public key addresses provide a concise, safe, human-readable way to reference actors before the chain state is final. ID addresses are a space-efficient way to identify actors in the Filecoin chain state, where every byte matters.
+
+Filecoin supports two types of public key addresses:
+
+* [secp256k1 addresses](https://en.bitcoin.it/wiki/Secp256k1) that begin with the protocol indicator as `1`.
+* [BLS addresses](https://en.wikipedia.org/wiki/BLS\_digital\_signature) that begin with the protocol indicator as `3`.
+
+`t1iandfn6d...ddboqxbhoeva` - a testnet wallet address generated using secp256k1. `t3vxj34sbdr3...road7cbygq` - a testnet wallet address generated using BLS.
+
+## Delegated addresses
+
+Filecoin supports extensible, user-defined actor addresses through the `4` address class, introduced in [Filecoin Improvement Proposal (FIP) 0048](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0048.md). The `4` address class provides the following benefits to the network:
+
+* Implement foreign addressing systems in Filecoin.
+* A predictable addressing scheme to support interactions with addresses that do not yet exist on-chain.
+* User-defined, programmable addressing systems without extensive changes and network upgrades.
+
+For example, a testnet delegated address using the Ethereum Addressing System is structured as follows:
+
+```plaintext
+ Address manager actor ID
+ |
+t 410 iandfn6d...
+| |
+| New actor ID
+|
+Network identifier
+```
+
+The _address manager actor ID_ is the actor ID of the address manager actor, which creates new actors and assigns a `4` address to the new actor. This leverages the extensible feature of the `f4` address class.
+
+The _new actor ID_ is the arbitrary actor ID chosen by that actor.
+
+### Restrictions
+
+Currently, per [FIP 0048](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0048.md), `f4` addresses may only be assigned by and in association with specific, built-in actors called _address managers_. This restriction will likely be relaxed once users are able to deploy custom WebAssembly actors.
+
+This address type plays an essential role in supporting the FEVM. It allows the Filecoin network to be able to recognize the foreign address and validate and execute the transactions sent and signed by the supported foreign addresses.
+
+The supported foreign addresses can be cast as `f4/t4` addresses, and vice-versa. But not with `f1/t1` or `f3/t3` addresses.
+
+### Ethereum Address Manager
+
+Ethereum Address Manager (EAM) is a built-in actor that manages the Ethereum address space, anchored at the `410` address namespace. It acts like an EVM smart contract factory, offering methods to create and assign the `f410/t410` Filecoin address to Ethereum address.
+
+The subaddress of an `f410/t410` address is the original Ethereum address. Ethereum addresses can be cast as `f410` addresses, and vice-versa. The `f410/t410` address will be used for the Ethereum-compatible FVM (FEVM) development tools and applications built on FEVM.
+
+**Example**
+
+```plaintext
+# An Ethereum wallet address.
+0xd388ab098ed3e84c0d808776440b48f685198498
+
+# The corresponding Filecoin address on Calibration.
+t410f2oekwcmo2pueydmaq53eic2i62crtbeyuzx2gmy
+```
+
+If you have an Ethereum wallet address starting with `0x`, then the Ethereum Address Manager (EAM) will assign a corresponding `t410` Filecoin address to it. If you send 10 TFIL to `0xd388ab098ed3e84c0d808776440b48f685198498` using a wallet like MetaMask, you will receive 10 TFIL to your `t410f2oekwcmo2pueydmaq53eic2i62crtbeyuzx2gmy` address on Filecoin Calibration testnet.
+
+```plaintext
+# A Filecoin smart contract address.
+t410fl5qeigmkcytz7b6sqoojtcetqwf37dm4zv4aijq
+
+# The corresponding Ethereum smart contract address.
+0x5f6044198a16279f87d2839c998893858bbf8d9c
+```
+
+Again, assume you have deployed a solidity smart contract on Filecoin Calibration. Then you will receive a smart contract address starting with `t410`. EAM will also assign a corresponding `0x` Ethereum address to it.
+
+When you try to invoke this smart contract on Filecoin using Ethereum tooling, you need to use your `0x5f6044198a16279f87d2839c998893858bbf8d9c` smart contract address.
+
+### Converting to a 0x-style address
+
+The Filecoin EVM runtime introduces support for `0x` Ethereum-style addresses. Filecoin addresses starting with either `f0` or `f410f` can be converted to the `0x` format as follows:
+
+Addresses starting with `f0` can be converted to the `0x` format by:
+
+* Extracting the `actor_id` (e.g., the `1234` in `f01234`).
+* Hex encode with a `0xff` prefix: `sprintf("0xff0000000000000000000000%016x", actor_id)`.
+
+Addresses starting with `f410f` address can be converted to the `0x` format by:
+
+* Removing the `f410f` prefix.
+* Decoding the remainder as base 32 (RFC 4648 without padding).
+* Trim off the last 4 bytes. This is a _checksum_ that can optionally be verified, but that’s beyond the scope of this documentation.
+* Assert that the remaining address is 20 bytes long.
+* Hex-encode: `sprintf(0x%040x", actor_id)`.
+
+{% hint style="danger" %}
+`f0` addresses are **not** re-org stable and should not be used until the chain has settled.
+{% endhint %}
diff --git a/smart-contracts/filecoin-evm-runtime/difference-with-ethereum.md b/smart-contracts/filecoin-evm-runtime/difference-with-ethereum.md
new file mode 100644
index 000000000..49b05bffe
--- /dev/null
+++ b/smart-contracts/filecoin-evm-runtime/difference-with-ethereum.md
@@ -0,0 +1,62 @@
+---
+description: >-
+ While Filecoin EVM runtime aims to be compatible with the Ethereum ecosystem,
+ it has some marked differences.
+---
+
+# Difference with Ethereum
+
+## Gas costs
+
+Filecoin charges Filecoin gas only. This includes the Filecoin EVM runtime. Instead of the Filecoin EVM runtime charging gas according to the EVM spec for each EVM opcode executed, the Filecoin virtual machine (FVM) charges Filecoin gas for executing the EVM interpreter itself. The [How gas works](how-gas-works.md) page goes into this in more detail. Importantly, this means that Filecoin EVM runtime gas costs and EVM gas costs will be very different:
+
+1. EVM and Filecoin gas are different units of measurement and are not 1:1. Purely based on chain throughput (gas/second), the ratio of Ethereum gas to Filecoin gas is about 1:444. Expect Filecoin gas numbers to look _much_ larger than those in Ethereum.
+2. Because Filecoin charges Filecoin gas for executing the Filecoin EVM runtime interpreter:
+ 1. Some instructions may be more expensive and/or cheaper in Filecoin EVM runtime than they are in the EVM.
+ 2. EVM instruction costs can depend on the exact Filecoin EVM runtime code-paths taken, and caching.
+
+{% hint style="danger" %}
+⚠️ Filecoin gas costs are not set in stone and should never be hard-coded. Future network upgrades will break any smart contracts that depend on gas costs not changing.
+{% endhint %}
+
+## Gas stipend
+
+Solidity calls `address.transfer` and `address.send` to grant a fixed gas stipend of 2300 Ethereum gas to the called contract. The Filecoin EVM runtime automatically detects such calls, and sets the gas limit to 10 million Filecoin gas. This is a relatively more generous limit than Ethereum’s, but it’s future-proof. You should expect the address called to be able to carry out more work than in Ethereum.
+
+## Self destruct
+
+Filecoin EVM runtime emulates EVM self-destruct behavior but isn’t able to entirely duplicate it:
+
+1. There is no gas refund for self-destruct.
+2. On self-destruct, the contract is marked as self-destructed, but is not actually deleted from the Filecoin state-tree. Instead, it simply behaves as if it does not exist. It acts like an empty contract.
+3. Unlike in the EVM, in Filecoin EVM runtime, self-destruct can _fail_ causing the executing contract to revert. Specifically, this can happen if the specified beneficiary address is an embedded [ID address](../../basics/the-blockchain/addresses.md) and no actor exists with the specified ID.
+4. If funds are sent to a self-destructed contract after it self-destructs but before the end of the transaction, those funds remain with the self-destructed contract. In Ethereum, these funds would vanish after the transaction finishes executing.
+
+## CALLCODE
+
+The `CALLCODE` opcode has not been implemented. Use the newer `DELEGATECALL` opcode.
+
+## Bare-value sends
+
+In Ethereum, `SELFDESTRUCT` is the only way to send funds to a smart contract without giving the target smart contract a chance to execute code.
+
+In Filecoin, any actor can use `method 0`, also called a bare-value send, to transfer funds to any other actor without invoking the target actor’s code. You can think of this behavior as having the suggested [`PAY` opcode](https://eips.ethereum.org/EIPS/eip-5920) already implemented in Filecoin.
+
+## Precompiles
+
+The Filecoin EVM runtime, unlike Ethereum, does not usually enforce gas limits when calling precompiles. This means that it isn’t possible to prevent a precompile from consuming all remaining gas. The `call actor` and `call actor id` precompiles are the exception. However, they apply the passed gas limit to the actor call, not the entire precompile operation (i.e., the full precompile execution end-to-end can use more gas than specified, it’s only the final `send` to the target actor that will be limited).
+
+## Multiple Addresses
+
+In Filecoin, contracts generally have multiple addresses. Two of these address types, `f0` and `f410f`, can be converted to 0x-style (Ethereum) addresses which can be used in the `CALL` opcode. See [Converting to a 0x-style address](address-types.md#converting-to-a-0x-style-address) for details on how these addresses are derived.
+
+Importantly, this means that any contract can be called by either its “normal” EVM address (corresponding to the contract’s `f410f` address) or its “masked ID address” (corresponding from the contract’s `f0` address).
+
+However, the addresses returned by the CALLER, ORIGIN, and ADDRESS instructions will always be the same for the same contract.
+
+* The ADDRESS will always be derived from the executing contract’s `f410f` address, even if the contract was called via a masked ID address.
+* The CALLER/ORIGIN will be derived from the caller/origin’s `f410f` address, if the caller/origin is an Ethereum-style account or an EVM smart contract. Otherwise, the caller/origin’s “masked ID address” (derived from their `f0` address) will be used.
+
+## Deferred execution model
+
+When calling an Ethereum method that allows the user to ask for the `latest` block, Filecoin will return the `chain head` - `1` block. This behavior was implemented for compatibility with the deferred execution mode that Filecoin uses. In this mode, messages submitted at a given `height` are only processed at `height` + `1`. This means that receipts for a block produced at `height` are only available at `height` + `1`.
diff --git a/smart-contracts/filecoin-evm-runtime/filforwarder.md b/smart-contracts/filecoin-evm-runtime/filforwarder.md
new file mode 100644
index 000000000..2f6520f6e
--- /dev/null
+++ b/smart-contracts/filecoin-evm-runtime/filforwarder.md
@@ -0,0 +1,146 @@
+---
+description: >-
+ The FilFowarder is a smart contract that lets users transfer FIL from an
+ Ethereum-based f4 address to a Filecoin address of a different type.
+---
+
+# FILForwarder
+
+## The problem
+
+Filecoin has multiple [address spaces](../../basics/the-blockchain/addresses.md): `f0`, `f1`, `f2`, `f3`, and `f4`. Each address space fits a particular need for the Filecoin network. The `f410` address spaces allow Ethereum addresses to be integrated into the Filecoin network.
+
+Users interacting with the Filecoin EVM runtime need to use `f4` addresses, masked to the Ethereum-style `0x` address. These addresses can be created from wallets like MetaMask, Coinbase wallet, or any other EVM-based wallet that allows for custom networks. There are use cases where a user with FIL in an `0x`-style address would want to send FIL to an `f1`, `f2`, or `f3` address. For example, taking FIL out of a smart contract and sending it to a multi-sig account or an exchange.
+
+This is where the problem lies. Ethereum-based wallets do not recognize the `f1`, `f2`, or `f3` address formats, making it impossible to send FIL from an Ethereum-style address.
+
+## The solution
+
+The FilForwarder exposes a smart contract method called `forward` that takes a byte-level definition of a protocol address in an _f-style_ and a message value. It then uses the internal Filecoin APIs exposed using the Filecoin EVM runtime to properly send FIL funds reliably and as cheaply as possible. This also has the side effect of creating the actor ID should the address receiving address be considered new. In this way, using FilForwarder from an Ethereum wallet to any other Filecoin address space is safe and reliable.
+
+## Use FILForwarder
+
+You can use the FilForwarder contract in two ways:
+
+* Using the Glif.io browser wallet
+* Manually invoking the contract
+
+### Glif.io
+
+Before we start, make sure you know the address you’d like to forward your FIL to. You’ll need to ensure that the `f410` Ethereum-style address has enough FIL to cover the transaction costs.
+
+1. Go to [Glif.io](https://glif.io).
+2. Select the network you want to use from the dropdown and click **Connect Wallet**.
+
+ 
+
+ In this example, we’re using the (now depricated) Hyperspace testnet.
+3. Confirm that you want to connect your wallet to Glif.io. You will only be prompted to do this once.
+
+ 
+4. Click **Close** on the connection confirmation screen.
+
+ 
+5. Select your wallet address from the dropdown and click **Forward FIL**.
+
+ 
+6. Enter the destination address for your FIL, along with the amount of FIL you want to send:
+
+ 
+7. Double-check that your destination address is correct and click **Send**.
+8. You can check the transaction by clicking the transaction ID.
+
+ 
+9. Your funds should be available at the destination after around two minutes. You can check that your funds have arrived by searching for the destination address in a block explorer.
+
+ 
+10. If you can’t see your funds, make sure you’re viewing the correct network.
+
+ 
+
+It generally takes around two minutes for a transaction to complete and for the funds to be available at the destination.
+
+### Manually
+
+The FilForwarder contract can be interacted with using standard Ethereum tooling like Hardhat or Remix. In this guide, we’re going to use Hardhat, but these steps can be easily replicated using the [web-based IDE Remix](../developing-contracts/remix.md).
+
+#### **Prerequisites**
+
+This guide assumes you have the following installed:
+
+* [Yarn](https://yarnpkg.com/)
+* A Filecoin address stored in [MetaMask](../../basics/assets/metamask-setup.md)
+
+#### **Environment setup**
+
+First, we need to grab the FilFowarder kit and install the dependencies:
+
+1. Clone the FilForwarder repository and install the dependencies:
+
+```
+git clone https://github.com/lotus-web3/FilForwarder
+cd FilForwarder
+```
+
+2. Use Yarn to install the project's dependencies:
+
+```
+yarn install
+[1/4] 🔍 Resolving packages...
+[2/4] 🚚 Fetching packages...
+[3/4] 🔗 Linking dependencies...
+
+...
+
+✨ Done in 16.34s.
+```
+
+3. Create an environment variable for your private key.
+
+```shell
+export PRIVATE_KEY=''
+
+# For example
+# export PRIVATE_KEY='d52cd65a5746ae71cf3d07a8cf392ca29d7acb96deba7d94b19a9cf3c9f63022'l
+```
+
+Always be careful when dealing with your private key. Double-check that you’re not hardcoding it anywhere or committing it to source control like GitHub. Anyone with access to your private key has complete control over your funds.
+
+#### **Invoke the contract**
+
+The contract is deterministically deployed on all Filecoin networks at `0x2b3ef6906429b580b7b2080de5ca893bc282c225`. Any contract claiming to be a FilForwarder that does not reside at this address should not be trusted. Any dApp can connect to the wallet and use the ABI in this repository to call this method using any frontend. See the [Glif section](https://docs.filecoin.io/smart-contracts/filecoin-evm-runtime/filforwader/#glifio) above for steps on using a GUI.
+
+Inside this repository is a Hardhat task called `forward`. This task will use the private key to send funds using the contract. This task uses the `fil-forwarder-{CHAIN_ID}.json` file to determine the deployed contract address for a given network. These addresses should always be the same, but these files prevent you from having to specify it each time.
+
+The `forward` command uses the following syntax:
+
+```shell
+yarn hardhat forward \
+ --network \
+ --destination \
+ --amount
+```
+
+* `NETWORK`: The network you want to use. The options are `mainnet` and `calibration`.
+* `DESTINATION_ADDRESS`: The address you want to send FIL to. This is a string, like `t01024` or `t3tejq3lb3szsq7spvttqohsfpsju2jof2dbive2qujgz2idqaj2etuolzgbmro3owsmpuebmoghwxgt6ricvq`.
+* `AMOUNT`: The amount of FIL you want to send. The value `3.141` would be 3.141 FIL.
+
+#### **Examples**
+
+1. To send 9 FIL to a `t3` address on the Calibration testnet, run:
+
+```sh
+yarn hardhat forward \
+ --network calibration \
+ --destination t3tejq3lb3szsq7spvttqohsfpsju2jof2dbive2qujgz2idqaj2etuolzgbmro3owsmpuebmoghwxgt6ricvq \
+ --amount 9.0
+```
+
+2. To send 42.5 FIL to a `t1` address on the Calibration testnet, run:
+
+```shell
+yarn hardhat forward \
+ --network calibration \
+ --destination t010135 \
+ --amount 42.5
+```
diff --git a/smart-contracts/filecoin-evm-runtime/how-gas-works.md b/smart-contracts/filecoin-evm-runtime/how-gas-works.md
new file mode 100644
index 000000000..28a1e2c5f
--- /dev/null
+++ b/smart-contracts/filecoin-evm-runtime/how-gas-works.md
@@ -0,0 +1,112 @@
+---
+description: >-
+ Instead of assigning a fixed gas cost in each instruction, the Filecoin EVM
+ runtime charges FIL gas based on the WASM code execution of the Filecoin EVM
+ runtime interpreter.
+---
+
+# How gas works
+
+When executing a message that invokes an EVM contract, the Filecoin virtual machine charges for the message chain inclusion (when the message originates off-chain) and then invokes the actor that hosts the contract. The actor is an instance of the EVM actor, which uses the Filecoin EVM runtime interpreter to execute the contract.
+
+The FEVM interpreter must first load its state, including the contract state, which costs additional gas. The interpreter then begins the execution of the contract bytecode. Each opcode interpreted may perform computation, syscalls, state i/o, and send new messages, all of which are charged with FIL gas. Finally, if the contract state is modified, the interpreter must flush it to the blockstore, which costs additional gas.
+
+Generally, it is not possible to compute gas costs for a contract invocation without using gas estimation through speculative execution.
+
+## Calculation example
+
+The total gas fee of a message is calculated as the following:
+
+```plaintext
+ (Gas usage × Base fee)
++ (GasLimit × GasPremium)
++ (OverEstimationBurn × BaseFee)
+```
+
+Take a look at the [Gas usage section of the How Filecoin works page](how-gas-works.md) for more information on the various gas-related parameters attached to each message.
+
+Let’s take a transaction as an example. Our gas parameters are:
+
+* `GasUsage` = `1000` attoFIL
+* `BaseFee` = `20` attoFIL
+* `Gas limit` = `2000` attoFIL
+* `Gas premium` = `5` attoFIL
+
+The total fee is `(GasUsage × BaseFee) + (Gaslimit x GasPremium)`:
+
+```plaintext
+ 1000
+x 20
+= 20000
+
+ 2000
+x 5
+= 10000
+
+ 20000
++ 10000
+= 30000 attoFIL
+```
+
+Additionally, the message sender can also set the `GasFeeCap` parameter they are willing to pay. If the sender sets the `GasLimit` too high, the network will compute the amount of gas to be refunded and the amount of gas to be burned as `OverEstimationBurn`.
+
+## Estimate gas
+
+Filecoin nodes, such as Lotus, have several JSON-API API endpoints designed to help developers estimate gas usage. The available JSON-RPC APIs are:
+
+* `GasEstimateMessageGas`: estimate gas values for a message without any gas fields set, including GasLimit, GasPremium, and GasFeeCap. Returns a message object with those gas fields set.
+* `GasEstimateGasLimit` takes the input message and estimates the `GasLimit` based on the execution cost as well as a transaction multiplier.
+* `GasEstimateGasPremium`: estimates what `GasPremium` price you should set to ensure a message will be included in `N` epochs. The smaller `N` is the larger `GasPremium` is likely to be.
+* `GasEstimateFeeCap`: estimate the `GasFeeCap` according to `BaseFee` in the parent blocks.
+
+If you want to learn more about how to use those JSON-RPC APIs for the Filecoin gas model, please check the [JSON RPC API docs for Gas](../../reference/json-rpc/).
+
+Gas estimation varies from network to network. For example, the `BaseFee` on mainnet is different from the `BaseFee` on the Calibration testnet.
+
+If you’d rather not calculate and estimate gas for every message, you can just leave the optional fields unset. The gas fields will be estimated and set when the message is pushed to the mempool.
+
+## Ethereum compatibility
+
+Since Filecoin is fully EVM-compatible, Filecoin nodes also provide Ethereum-compatible APIs to support gas estimation:
+
+* [EthEstimateGas](../../reference/json-rpc/eth.md#ethestimategas): generates and returns an estimate of how much gas is necessary to allow the transaction to complete.
+* [EthMaxPriorityFeePerGas](../../reference/json-rpc/eth.md#ethmaxpriorityfeepergas): returns a fee per gas that is an estimate of how much you can pay as a priority fee, or “tip”, to get a transaction included in the current block.
+
+To request the current max priority fee in the network, you can send a request to a public Filecoin endpoint:
+
+```shell
+curl --location --request POST 'https://api.calibration.node.glif.io/rpc/v1' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "jsonrpc":"2.0",
+ "method":"eth_maxPriorityFeePerGas",
+ "params": null,
+ "id":1
+}' | jq
+```
+
+This will output something like:
+
+```plaintext
+{
+ "jsonrpc": "2.0",
+ "result": "0x31157",
+ "id": 1
+}
+```
+
+You can convert the `result` field from hexadecimal to base 10 in your terminal. Take the `result` output and remove the `0x` from the start. Then use `echo` to output the conversion:
+
+```shell
+echo $((16#31157))
+
+# 201047
+```
+
+## Additional Resources
+
+* Gas Filecoin improvement proposals (FIPs):
+ * [FIP 0032](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0032.md)
+ * [FIP 0037](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0037.md)
+ * [FIP 0054](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0054.md)
+* [Primitive Gas Price list](https://github.com/filecoin-project/ref-fvm/blob/master/fvm/src/gas/price\_list.rs)
diff --git a/smart-contracts/filecoin-evm-runtime/precompiles.md b/smart-contracts/filecoin-evm-runtime/precompiles.md
new file mode 100644
index 000000000..672e015fd
--- /dev/null
+++ b/smart-contracts/filecoin-evm-runtime/precompiles.md
@@ -0,0 +1,122 @@
+---
+description: >-
+ A precompile refers to a pre-existing piece of code or a smart contract that
+ is already deployed on the Filecoin network for use be developers.
+---
+
+# Precompiles
+
+The Filecoin virtual machine (FVM) has several pre-compiled contracts called precompiles. Each precompile address starts with `0xfe000...`. Specifically:
+
+* [Resolve address `0xfe00..01`](precompiles.md#resolve-address)
+* [Lookup delegated address `0xfe00..02`](precompiles.md#lookup-delegated-address)
+* [Call actor by address `0xfe00..03`](precompiles.md#call-actor-by-address)
+* [Call actor by ID `0xfe00..05`](precompiles.md#call-actor-by-id)
+
+## Resolve Address
+
+Address: `0xfe00000000000000000000000000000000000001`
+
+Resolves a Filecoin address (e.g., “f01”, “f2abcde”) into a Filecoin actor ID (`uint64`). Every actor in Filecoin has an actor ID.
+
+* Input: The Filecoin address in its _bytes_ representation.
+* Output:
+ * If the target actor exists, succeed and return an ABI-encoded actor ID (u64).
+ * If the target actor doesn’t exist, succeed with no return value.
+ * If the supplied address is invalid (cannot be parsed as a Filecoin address), revert.
+
+Example:
+
+```solidity
+(bool success, bytes memory actor_id_bytes) = address(0xfe00000000000000000000000000000000000001).staticcall(fil_address_bytes);
+require(success, "invalid address");
+require(actor_id_bytes.length == 32, "actor not found");
+uint64 actor_id = abi.decode(actor_id_bytes);
+```
+
+## Lookup Delegated Address
+
+Address: `0xfe00000000000000000000000000000000000002`
+
+Looks up the “delegated address” (f4 address) of an actor by ID. This precompile is _usually_ used to lookup the Ethereum-style address of an actor by:
+
+1. Looking up the delegated address.
+2. Checking that the delegated address is 22 bytes long and starts with `0x040a`.
+3. Returning the last 20 bytes (which will be the Ethereum-style address of the target actor).
+
+* Input: An ABI-encoded actor ID (u64 encoded as a u256).
+* Output:
+ * If the supplied actor ID is larger than max u64, revert.
+ * If the target actor exists and has a delegated address, succeed and return the delegated address as raw bytes.
+ * Otherwise, succeed with no return value.
+
+Example:
+
+```solidity
+(bool success, bytes memory delegated_address_bytes) = address(0xfe00000000000000000000000000000000000002).staticcall(abi.encode(uint256(actor_id)));
+```
+
+## Call Actor By Address
+
+Address: `0xfe00000000000000000000000000000000000003`
+
+Calls the specified actor using the native FVM calling convention by its _Filecoin_ address. This precompile must be called with `DELEGATECALL` as the precompile will call the target actor _on behalf of_ the currently executing contract.
+
+### Input: ABI Encoded
+
+{% code overflow="wrap" %}
+```json
+(uint64 method, uint256 value, uint64 flags, uint64 codec, bytes params, bytes filAddress)
+```
+{% endcode %}
+
+* `method` is the Filecoin method number. The precompile will revert if the method number is not either 0 (bare value transfer) or at least 1024. Methods between 1 and 1023 inclusive are currently restricted (but may be allowed in the future).
+* `value` is the value to transfer in attoFIL.
+* `codec` is the IPLD codec of the parameters. This must either be 0x51 or 0x00 (for now) and will revert if passed an illegal codec:
+ * If the parameters are non-empty, they must be CBOR, and the codec must be 0x51.
+ * If the parameters are empty, the codec must be 0x00.
+* `params` are the CBOR-encoded message parameters, if any.
+* `filAddress` is the Filecoin address of the caller.
+
+### Output: ABI Encoded
+
+```
+(int256 exit_code, uint64 return_codec, bytes return_value)
+```
+
+* `exit_code` is one of:
+ * `= 0` to indicate the call exited successfully.
+ * `> 0` to indicate that the target actor _reverted_ with the specified `exit_code`.
+ * `< 0` to indicate the call itself failed with the [syscall-error](https://docs.rs/fvm\_sdk/0.6.1/fvm\_sdk/sys/enum.ErrorNumber.html) `-exit_code`.
+* `return_codec` codec of returned data. This will be one of (for now):
+ * 0x51 or 0x71 - CBOR
+ * 0x55 - raw (the target actor returned raw data)
+ * 0x00 - nothing (the returned data will be empty as well).
+
+{% hint style="danger" %}
+This precompile only reverts if an input is statically invalid. If the precompile fails to call the target actor for any other reason, it will return a non-zero `exit_code` but will not revert.
+{% endhint %}
+
+Example:
+
+```solidity
+(bool success, bytes memory data) = address(0xfe00000000000000000000000000000000000003).delegatecall(abi.encode(method, value, flags, codec, params, filAddress));
+(int256 exit, uint64 return_codec, bytes memory return_value) = abi.decode(data, (int256, uint64, bytes));
+```
+
+## Call Actor By ID
+
+Address: `0xfe00000000000000000000000000000000000005`
+
+This precompile is identical to the “Call Actor By Address” (0xfe00..03) except that it accepts an actor ID (`uint64`) instead of an actor address as the last parameter. That is:
+
+```solidity
+(uint64 method, uint256 value, uint64 flags, uint64 codec, bytes params, uint64 actorId)
+```
+
+Example:
+
+```solidity
+(bool success, bytes memory data) = address(0xfe00000000000000000000000000000000000005).delegatecall(abi.encode(method, value, flags, codec, params, id));
+(int256 exit, uint64 return_codec, bytes memory return_value) = abi.deco
+```
diff --git a/smart-contracts/fundamentals/README.md b/smart-contracts/fundamentals/README.md
new file mode 100644
index 000000000..80ef7aa14
--- /dev/null
+++ b/smart-contracts/fundamentals/README.md
@@ -0,0 +1,60 @@
+---
+description: >-
+ Learn about the various tools and options for adding Filecoin storage to
+ software applications, smart contracts, and workflows.
+---
+
+# Fundamentals
+
+## Develop on Filecoin
+
+Filecoin combines the benefits of content-addressed data leveraged by IPFS with blockchain-powered storage guarantees. The network offers robust and resilient distributed storage at massively lower cost compared to current centralized alternatives.
+
+Developers choose Filecoin because it:
+
+* is the world’s largest distributed storage network, without centralized servers or authority
+* offers on-chain proofs to verify and authenticate data
+* is highly compatible with [IPFS](https://ipfs.tech/) and content addressing
+* is the only decentralized storage network with petabyte-scale capacity
+* stores data at extremely low cost (and keeps it that way for the long term)
+
+## Filecoin and IPFS
+
+How do Filecoin and IPFS work together? They are complementary protocols for storing and sharing data in the distributed web. Both systems are open-source and share many building blocks, including content addressing (CIDs) and network protocols (libp2p).
+
+IPFS does not include built-in mechanisms to incentivize the storage of data for other people. To persist IPFS data, you must either run your own IPFS node or pay a provider.
+
+This is where Filecoin comes in. Filecoin adds an incentive layer to content-addressed data. Storage deals are recorded on-chain, and providers must submit proofs of storage to the network over time. Payments, penalties, and block rewards are all enforced by the decentralized protocol.
+
+Filecoin and IPFS are designed as separate layers to give developers more choice and modularity, but many tools are available for combining their benefits. This diagram illustrates how these tools (often called storage helpers) provide developer-friendly APIs for storing on IPFS, Filecoin, or both.
+
+
+
+## Filecoin and smart contracts
+
+You can improve speed and reduce gas fees by storing smart contract data on Filecoin. With Filecoin, the data itself is stored off-chain, but is used to generate verifiable CIDs and storage proofs that are recorded on the Filecoin chain and can be included in your smart contracts. This design pairs well with multiple smart contract networks such as Ethereum, Polygon, Avalanche, Solana, and more. Your smart contract only needs to include the compact content ids.
+
+## Get started
+
+Let’s get building. Choose one of the following APIs. These are all storage helpers, or tools and services that abstract Filecoin’s robust deal making processes into simple, streamlined API calls.
+
+* [Chainsafe Storage API](https://docs.storage.chainsafe.io/) - for projects needing S3 compatibility
+* [Estuary](https://estuary.tech/) - for larger-scale applications and public data
+* [NFT.storage](https://nft.storage/) - for NFT data
+* [Web3.storage](https://web3.storage/) - for general application data
+
+Examples:
+
+* [Polygon tutorial](https://nftschool.dev/tutorial/mint-nftstorage-polygon/) on NFTschool.dev
+* [Flow tutorial](https://nftschool.dev/tutorial/flow-nft-marketplace/) on NFTschool.dev
+* [Avalanche tutorial](https://nftschool.dev/tutorial/avax-nft/) on NFTschool.dev
+* [Using IPFS & Filecoin on Harmony](https://docs.harmony.one/home/developers/tutorials/ipfs-filecoin)
+
+## Additional resources
+
+* [Filecoin integrations for Web3 infrastructure](https://www.youtube.com/watch?v=Q0oe6i7d1u4) (video)
+* [What is an IPFS Pinning Service?](https://medium.com/pinata/what-is-an-ipfs-pinning-service-f6ed4cd7e475) (Pinata explainer)
+* [IPFS documentation: Persistence, permanence and pinning](https://docs.ipfs.tech/concepts/persistence/)
+* [Developing on Filecoin](https://www.youtube.com/watch?v=aGCpq0Xf-w8) (video)
+* Textile tools: [video](https://www.youtube.com/watch?v=IZ8M9m9\_uJY) and [blog post](https://blog.textile.io/developer-tools-for-filecoin-ipfs-web/)
+* [Building decentralized apps using Fleek’s Space daemon](https://www.youtube.com/watch?v=pWJ5fty-7mA) (video)
diff --git a/smart-contracts/fundamentals/erc-20-quickstart.md b/smart-contracts/fundamentals/erc-20-quickstart.md
new file mode 100644
index 000000000..c0de6d8e7
--- /dev/null
+++ b/smart-contracts/fundamentals/erc-20-quickstart.md
@@ -0,0 +1,201 @@
+---
+description: >-
+ In this quickstart tutorial we’ll walk through how to deploy your first
+ smart-contract to the Filecoin network.
+---
+
+# ERC-20 quickstart
+
+We’re going to install a browser-based wallet called MetaMask, create a new wallet address, supply some test currency to that wallet, and then use a browser-based development environment called Remix to deploy a smart contract to the Filecoin network. We’re going to be creating an ERC-20 token in this quickstart. The ERC-20 contract is used a lot in representing a massive array of tokens across multiple blockchains, primarily the Ethereum blockchain.
+
+{% hint style="info" %}
+If you’re an Ethereum developer, check out the [FEVM Hardhat kit](../developing-contracts/hardhat.md).
+{% endhint %}
+
+## Accounts and assets
+
+We’re going to be using MetaMask, a cryptocurrency wallet that lives in your browser making it very easy for users to interact with web3-based sites!
+
+### Create a wallet
+
+Before we can interact with the Filecoin network, we need funds. But before we can get any funds, we need somewhere to put them!
+
+1. Open your browser and visit the [MetaMask website](https://metamask.io/).
+2. Install the wallet by clicking the **Download for** button. MetaMask is available for Brave, Chrome, Edge, Firefox, and Opera.
+3. Once you have installed MetaMask, it will open a **Get started** window.
+
+ 
+4. Click **Create a new wallet**.
+5. Enter a password to secure your MetaMask wallet. You will need to enter this password every time you use the wallet.
+
+ 
+6. Follow the prompts until you get to the **Secret Recovery Phrase** window. Read the information about what this _recovery phrase_ is on this page.
+7. Eventually you should get to the _Wallet creation success_ page!
+
+ 
+8. Once you’ve done that, you should have your account set up!
+
+ 
+
+### Switch networks
+
+You may notice that we are currently connected to the **Ethereum Mainnet**. We need to point MetaMask to the Filecoin network, specifically the [Calibration testnet](../../networks/calibration/). We’ll use a website called [chainlist.network](https://chainlist.network) to give MetaMask the information it needs quickly.
+
+1. Go to [chainlist.network](https://chainlist.network).
+2. Enable the **Testnets** toggle and enter `Filecoin` into the search bar.
+
+ 
+3. Scroll down to find the **Filecoin – Calibration** **testnet**.
+4. In MetaMask click **Next**.
+
+ 
+5. Click **Connect.**
+6. Click **Approve** when prompted to _Allow this site to add a network._
+7. Click **Switch network** when prompted by MetaMask.
+8. Open MetaMask from the browser extensions tab:
+
+ 
+9. You should see the _Filecoin Calibration_ testnet listed at the top.
+
+Nice! Now we’ve got the Filecoin Calibration testnet set up within MetaMask. You’ll notice that our MetaMask window shows `0 TFIL`. Test-filecoin (`TFIL`) is `FIL` that has no value in the _real world_, and developers use it for testing. We’ll grab some `TFIL` next.
+
+### Get some funds
+
+1. In your browser, open MetaMask and copy your address to your clipboard:
+
+ 
+2. Go to [faucet.calibration.fildev.network](https://faucet.calibration.fildev.network/) and click **Send Funds.**
+3. Paste your address into the address field, and click **Send Funds**.
+4. The faucet will show a transaction ID. You can copy this ID into a Calibration testnet [block explorer](../../networks/calibration/explorers.md) to view your transaction. After a couple of minutes, you should see some `tFIL` transferred to your address.
+
+That’s all there is to it! Getting `tFil` is easy!
+
+## Contract creation
+
+The development environment we’re going to be using is called Remix, viewable at [remix.ethereum.org](https://remix.ethereum.org/). Remix is an incredibly sophisticated tool, and there’s a lot you can play around with! In this tutorial however, we’re going to stick to the very basics. If you want to learn more, check out [the Remix documentation](https://remix-ide.readthedocs.io/en/latest/).
+
+### Create a workspace
+
+In Remix, workspaces are where you can create a contract, or group of contracts, for each project. Let’s create a new workspace to create our new ERC-20 token.
+
+1. Open [remix.ethereum.org](https://remix.ethereum.org).
+2. Click the `+` icon next to **Workspaces** to create a new workspace:
+
+ 
+3. In the **Choose a template** dropdown, select **ERC 20**.
+4. Select the **Mintable** checkbox.
+5. Enter a fun name for your token in the **Workspace name** field. Something like `CorgiCoin` works fine.
+6. Click **OK** to create your new workspace.
+
+ 
+
+### Customize the contract
+
+The contract template we’re using is pretty simple. We just need to modify a couple of variables.
+
+1. Under the **contract** directory, click **MyToken.sol**.
+
+ 
+2. In the editor panel, replace `MyToken` with whatever you’d like to name your token. In this example, we’ll use `CorgiCoin`.
+
+ 
+3. On the same line, replace the second string with whatever you want the symbol of your token to be. In this example, we’ll use `CRG`.
+
+ 
+
+That’s all we need to change within this contract. You can see on line 4 that this contract is importing another contract from `@openzeppelin` for us, meaning that we can keep our custom token contract simple.
+
+### Compile
+
+1. Click the green play symbol at the top of the workspace to compile your contract. You can also press `CMD` + `s` on MacOS or `CTRL` + `s` on Linux and Windows.
+
+ 
+2. Remix automatically fetches the two `import` contracts from the top of our `.sol` contract. You can see these imported contracts under the `.deps` directory. You can browse the contracts there, but Remix will not save any changes you make.
+
+ 
+
+### Deploy
+
+Now that we’ve successfully compiled our contract, we need to deploy it somewhere! This is where our previous MetaMask setup comes into play.
+
+1. Click the **Deploy** tab from the left.
+
+ 
+2. Under the **Environment** dropdown, select **Injected Provider - MetaMask**.
+
+ 
+3. MetaMask will open a new window confirming that you want to connect your account to Remix.
+4. Click **Next**:
+
+ 
+5. Click **Connect** to connect your `tFIL` account to Remix.
+
+ 
+6. Back in Remix, under the **Account** field, you’ll see that it says something like `0x11F... (5 ether)`. This value is 5 `tFIL`, but Remix doesn’t support the Filecoin network so doesn’t understand what `tFIL` is. This isn’t a problem, it’s just a little quirk of using Remix.
+
+ 
+7. Under the **Contract** dropdown, ensure the contract you created is selected.
+
+ 
+8. Click **Deploy**.
+
+ 
+9. MetaMask will open a window and as you to confirm the transaction. Scroll down and click **Confirm** to have MetaMask deploy the contract.
+10. Back in Remix, a message at the bottom of the screen shows that the creation of your token is pending.
+
+ 
+11. Wait around 90 seconds for the deployment to complete.
+
+ 
+
+On the Filecoin network, a new set of blocks, also called a tipset, is created every thirty seconds. When deploying a contract, the transaction needs to be received by the network, and then the network needs to confirm the contract. This process takes around one to two tipsets to process – or around 60 to 90 seconds.
+
+## Use your contract
+
+Now that we’ve compiled and deployed the contract, it’s time to actually interact with it!
+
+### Mint your tokens
+
+Let’s call a method within the deployed contract to mint some tokens.
+
+1. Back in Remix, open the **Deployed Contracts** dropdown, within the **Deploy** sidebar tab.
+
+ 
+2. Expand the `mint` method. You must fill in two fields here: `to` and `amount`.
+
+ 
+3. The `to` field specifies where address you want these initial tokens sent to. Open MetaMask, copy your address, and paste it into this field.
+
+ 
+4. This field expects an `attoFil` value. 1 `FIL` is equal to 1,000,000,000,000,000,000 `attoFil`. So if you wanted to mint 100 `FIL`, you would enter `100` followed by 18 zeros: `100000000000000000000`.
+5. Click **Transact**.
+
+ 
+6. MetaMask will open a window and ask you to confirm the transaction:
+
+ 
+
+Again, you must wait for the network to process the transaction, which should take about 90 seconds. You can move on to the next section while you’re waiting.
+
+### Add to MetaMask
+
+Currently, MetaMask has no idea what our token is or what it even does. We can fix this by explicitly telling MetaMask the address of our contract.
+
+1. Go back to Remix and open the **Deploy** sidebar tab.
+2. Under **Deployed Contracts**, you should see your contract address at the top. Click the copy icon to copy the address to your clipboard:
+
+ 
+3. Open MetaMask, select **Assets**, and click **Import your tokens**:
+
+ 
+4. In the **Token contract address** field, paste the contract address you just copied from Remix and then click **Add custom token**. MetaMask should autofill the rest of the information based on what it can find from the Filecoin network.
+
+ 
+5. Click **Import token**:
+6. You should now be able to see that you have 100 of your tokens within your MetaMask wallet!
+
+ 
+
+### Share your tokens
+
+Having a bunch of tokens in your personal MetaMask is nice, but why not send some tokens to a friend? Your friend needs to create a wallet in MetaMask as we did in the [Create a wallet](erc-20-quickstart.md#create-a-wallet) and [Switch networks](erc-20-quickstart.md#switch-networks) sections. They will also need to import your contract deployment address like you did in the [Add your tokens to MetaMask](../../basics/assets/metamask-setup.md) section. Remember, you need to pay gas for every transaction that you make! If your friend tries to send some of your tokens to someone else but can’t, it might be because they don’t have any `tFil`.
diff --git a/smart-contracts/fundamentals/faqs.md b/smart-contracts/fundamentals/faqs.md
new file mode 100644
index 000000000..546e3c3f6
--- /dev/null
+++ b/smart-contracts/fundamentals/faqs.md
@@ -0,0 +1,118 @@
+---
+description: >-
+ A list of frequent asked questions about FVM, FEVM and how to build on
+ Filecoin network.
+---
+
+# FAQs
+
+Here’s a collection of general FAQs that the team has gathered. If you are looking for more technical FAQs, please head to [Filecoin Community Discussion](https://github.com/filecoin-project/community/discussions/categories/q-a).
+
+## **What is FVM**
+
+The FVM (Filecoin virtual machine) enables developers to write and deploy custom code to run on top of the Filecoin blockchain. This means developers can create apps, markets, and organizations built around data stored on Filecoin.
+
+## **What broader implications does FVM have**
+
+FVM allows us to think about data stored on Filecoin differently. Apps can now build a new layer on the Filecoin network to enable trading, lending, data derivatives, and decentralized organizations built around datasets.
+
+## **What problems does FVM solve**
+
+FVM can create incentives to solve problems that Filecoin participants face today around data replication, data aggregation, and liquidity for miners. Beyond these, there is a long tail of data storage and retrieval problems that will also be resolved by user programmability on top of Filecoin.
+
+## **How does Aptos compare to FVM**
+
+[Aptos](https://aptoslabs.com/) is a Move-based L1 chain, whereas FVM is a WASM runtime on the Filecoin chain. The latter comes with an EVM right of the box; the former does not. The FVM also supports programmable storage with deals on Filecoin.
+
+## **How does the FVM directly interact with data on Filecoin**
+
+The FVM operates on blockchain state data — it does _not_ operate on data stored in the Filecoin network. This is because access to that data depends on network requests, an unsealed copy’s availability, and the SPs’ availability to supply that data.
+
+Access and manipulation of data stored in the network will happen via L2 solutions, for example, retrieval networks or compute-over-data networks, e.g., Saturn or CoD.
+
+## **How do other EVMs compare to FEVM**
+
+Unlike other EVM chains, FEVM specifically allows you to write contracts that orchestrate programmable storage. This means contracts that can coordinate storage providers, data health, perpetual storage mechanisms, and more. Other EVM chains do not have direct access to Filecoin blockchain state data.
+
+## **What is an actor**
+
+An actor is code that the Filecoin virtual machine can run. Actors are also referred to as smart contracts.
+
+## **What are built-in actors**
+
+[Built-in actors](https://github.com/filecoin-project/builtin-actors) are code that come precompilied into the Filecoin clients and can be run using the FVM. They are similar to [Ethereum precompiles](https://www.evm.codes/precompiled?fork=merge).
+
+## **Why use the FEVM vs any other EVM compatible chain**
+
+Having storage contracts as a native primitive open to smart contract developers. Reduce costs of writing to storage from an EVM smart contract to a separate storage service.
+
+## **Why FEVM vs native FVM**
+
+FEVM allows Solidity developers to easily write/port actors to the FVM using the tools that have already been introduced in the Ethereum ecosystem.
+
+## **What applications make FVM/FEVM unique**
+
+Applications that natively make use of storage contracts. Perpetual storage contracts, Data DAOs, etc.
+
+## **What is perpetual storage**
+
+Perpetual storage is a unique actor design paradigm only available on the FVM that allows users the ability to renew Filecoin storage deals and to keep them active indefinitely. This could be achieved by using a Decentralized Autonomous Organization (DAO) structure for example.
+
+## **What are Data DAOs**
+
+Data DAOs are a unique design paradigm FVM developers could create which use Filecoin storage to store all their data instead of a service like AWS (which is currently used).
+
+## **Is FVM part of Filecoin clients like Lotus**
+
+Yes.
+
+## **Do I have to install Lotus to work with FVM**
+
+Not necessarily. You can use any of the public RPC nodes on either [mainnet](../../networks/mainnet/) or the \[Calibration testnet]\(/networks/calibration/details/
+
+* `api.hyperspace.node.glif.io/rpc/v1`
+* `api.zondax.ch/fil/node/hyperspace/rpc/v1`
+
+## **What is the difference between the FVM and Bacalhau**
+
+They are synergistic. Compute over data solutions such as [Bacalhau](https://github.com/filecoin-project/bacalhau) can use the FVM.
+
+## **Why does the FVM use WASM**
+
+Many [different languages](https://github.com/appcypher/awesome-wasm-langs) already compile to WASM so developers can pick their favorite.
+
+## **Is the FEVM a bridge to the EVM**
+
+No, the FEVM is its own instance of the EVM built on top of Filecoin. You will need to redeploy smart contracts that exist in the EVM to the FEVM. Bridges can be built to top of the FEVM which connect it to other blockchains however.
+
+## **How is the Filecoin network accessed through Solidity**
+
+When an EVM is deployed to FEVM, it is compiled with WASM and an actor instance is created in FEVM that runs the EVM bytecode. The user-defined FEVM actor is then able to interact with the Filecoin network via built-in actors like the Market and Miner APIs.
+
+## **Can I deploy EVM bytecode to the native FVM**
+
+No, it must be deployed to the FEVM.
+
+## **What frontend framework should I use?**
+
+React, Ethers.js, web.js, ReactJS work well.
+
+## **How do we convert from msg.sender in a FEVM contract, which returns an EVM `0x` address, to the underlying Filecoin `f` address?**
+
+You can use the npm [`@glif/filecoin-address`](https://www.npmjs.com/package/@glif/filecoin-address) package or the [Zondax mock API](https://github.com/Zondax/fevm-solidity-mock-api) has the constructor that calls `mock_generate_deals();`.
+
+## **How do I bound the replicator factor from solidity FEVM?**
+
+Store a number limit on running `DealClient` and `publish_deal` and have it authorized to replicate.
+
+## **How can I use FVM to store data to Filecoin**
+
+The intent of FEVM/FVM is to compute over state data (the metadata of your stored data). Storage providers are the ones that are able to store your data and upload the deal to the Filecoin network. Data retrieval happens via Retrieval Providers, accepting the client’s ask to retrieve and working with storage providers to decrypt the data to deliver to the client. FEVM/FVM is able to build logic around these 2 processes and automate, add verification and proofs, time-lock retrievals etc.
+
+## **How do I close a storage deal on Filecoin and stop storage providers (SP) from storing my data on-chain**
+
+It’s not impossible but storage providers are incentivized not to close the storage deal as they are slashed for not providing [Proof of Spacetime (PoSt)](../../reference/general/glossary.md#proof-of-spacetime-post). Someone has to pay for the broken promise a miner makes to the chain and you need a custom market actor for it most likely to make the deal. You need to make deals for a certain amount of time - right now the boundaries are 6-18 months. You cannot ask a storage provider to take down your data without contacting them off-chain.
+
+## **How do I check a storage provider’s balance with their FEVM address**
+
+You can query balance of any address using the Zondax [Account API](https://docs.zondax.ch/openapi).
diff --git a/smart-contracts/fundamentals/filecoin-evm-runtime.md b/smart-contracts/fundamentals/filecoin-evm-runtime.md
new file mode 100644
index 000000000..ac258a5ad
--- /dev/null
+++ b/smart-contracts/fundamentals/filecoin-evm-runtime.md
@@ -0,0 +1,33 @@
+---
+description: >-
+ This page details what exactly EVM compatibility means for the FVM, and any
+ other information that Ethereum developers may need to build applications on
+ Filecoin.
+---
+
+# Filecoin EVM runtime
+
+The Ethereum Virtual Machine is an execution environment initially designed, built for, and run on the Ethereum blockchain. The EVM was revolutionary because, for the first time, any arbitrary code could be deployed to and run on a blockchain. This code inherited all the decentralized properties of the Ethereum blockchain. Before the EVM, a new blockchain had to be created with custom logic and then bootstrapped with validators every time a new type of decentralized application needed to be built.
+
+Code deployed to EVM is typically written in the high-level language Solidity, although other languages, such as Vyper, exist. The high-level Solidity code is compiled to EVM bytecode which is what is actually deployed to and run on the EVM. Due to it being the first virtual machine to run on top of a blockchain, the EVM has developed one of the strongest developer ecosystems in Web3 to date. Today, many different blockchains run their own instance of the EVM to allow developers to easily port their existing applications into the new blockchain’s ecosystem.
+
+## Ethereum Virtual Machine
+
+The Filecoin EVM, often just referred to as _FEVM_, is the Ethereum virtual machine virtualized as a runtime on top of the Filecoin virtual machine. It allows developers to port any existing EVM-based smart contracts straight onto the FVM. The Filecoin EVM runtime is completely compatible with any EVM development tools, such as Hardhat, Brownie, and MetaMask, making deploying and interacting with EVM-based actors easy! This is because Filecoin nodes offer the Ethereum JSON-RPC API.
+
+## FEVM and native FVM
+
+Once [Milestone 2.2 of the FVM roadmap](roadmap.md) is complete, developers will have the option to deploy actors on either the FEVM or native FVM, or both if they really want to. But which should you choose? The decision can be summed up as such: if you want better performance, write actors that are compiled to WASM and deployed to native FVM. If you are familiar with Solidity and want access to the EVM ecosystem of tools, but don’t mind less performance, deploy to the FEVM. See the pros and cons of each below:
+
+| | FVM | FEVM |
+| -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- |
+| **Pros** | Native execution speed and performance on Filecoin (i.e., less gas cost per unit of actor code executed). Write actors in any language that compiles to WASM 1.
| Take advantage of current Solidity and EVM tooling to quickly port or write actors. |
+| **Cons** | Tooling is not yet as mature as EVM tooling. | Higher gas fees and lower performance due to the virtualization overhead of the FEVM. |
+
+In both cases, you have access to all the awesome power of the Filecoin blockchain, including storage contracts as a native primitive!
+
+## Deep dive
+
+For a deeper dive into the concepts discussed on this page, see this presentation Ethereum compatibility of FVM, see:
+
+{% embed url="https://www.youtube.com/watch?v=lgUMVhM3FIM" %}
diff --git a/smart-contracts/fundamentals/roadmap.md b/smart-contracts/fundamentals/roadmap.md
new file mode 100644
index 000000000..f12f25a13
--- /dev/null
+++ b/smart-contracts/fundamentals/roadmap.md
@@ -0,0 +1,94 @@
+---
+description: >-
+ The FVM project has come a long way in an incredibly short amount of time.
+ This is the roadmap for FVM features for the Filecoin network.
+---
+
+# Roadmap
+
+## Goal
+
+The goal of the FVM project is to add general programmability to the Filecoin blockchain. Doing so will give developers all kinds of creative options, including:
+
+* Orchestrating storage.
+* Creating L2 networks on top of the Filecoin blockchain.
+* Providing new incentive structures for providers and users.
+* Frequently verifying that providers are storing data correctly.
+* Automatically finding which storage providers are storing what data.
+* Many more data-based applications.
+
+Filecoin was the first network deploying programmability, post-genesis, to ensure that layer 0 of the Filecoin blockchain was stable and fully functional. Due to the large amounts of capital already secured within the Filecoin network, the development of the FVM needs to be careful and gradual.
+
+## Roadmap
+
+The FVM roadmap is split into three initiatives:
+
+* Milestone 1: Initialize the project and allow built-in actors to run on the FVM.
+* Milestone 2: Enable the deployment of Ethereum virtual machine (EVM) compatible smart contracts onto the FVM. Also, allow developers to create and deploy their own native actors to the FVM.
+* Milestone 3: Continue to enhance programmability on FVM.
+
+### ✅ Milestone 0
+
+**✅ Lotus mainnet canaries with FVM support**
+
+_Completed in February 2022_
+
+The reference FVM implementation has been integrated into a fork of Lotus (the Filecoin reference client). A fleet of canary nodes have been launched on mainnet, running WASM-compiled built-in actors on the FVM. The canaries are monitored for consensus faults and to gather telemetry. This milestone is a testing milestone that’s critical to collect raw execution data to feed into the overhaul of the gas model, in preparation for user-programmability. It implies no network upgrade.
+
+### ✅ Milestone 0.5
+
+**✅ Ability to run FVM node and sync mainnet**
+
+_Completed in March 2022_
+
+Any node operator can sync the Filecoin Mainnet using the FVM and Rust built-in actors, integrated in Lotus, Venus, Forest, and Fuhon implementations. It implies no network upgrade.
+
+### ✅ Milestone 1
+
+**✅ Introduction of non-programmable WASM-based FVM**
+
+_Completed in May 2022_
+
+Mainnet will atomically switch from the current legacy virtual machines to the WASM-based reference FVM. A new gas model will be activated that accounts for actual WASM execution costs. Only Rust built-in actors will be supported at this time. This milestone requires a network upgrade.
+
+**✅ Network Version 17 (nv17): Initial protocol refactors for programmability**
+
+_Completed in November 2022_
+
+An initial set of protocol refactors targeting built-in actors, including the ability to introduce new storage markets via user-defined smart contracts.
+
+### ✅ Milestone 2.1
+
+**✅ Ability to deploy EVM contracts to mainnet (FEVM)**
+
+_Completed in March 2023_
+
+The Filecoin network will become user-programmable for the first time. Developers will be able to deploy smart contracts written in Solidity or Yul, and compiled to EVM. Smart contracts will be able to access Filecoin functionality by invoking built-in actors. Existing Ethereum tooling will be compatible with Filecoin. This milestone requires a network upgrade.
+
+**✅ Hyperspace testnet goes live**
+
+_Completed on January 16th 2023_
+
+A new stable developer testnet called Hyperspace will be launched as the pre-production testnet. The community is invited to participate in heavy functional, technical, and security testing. Incentives and bounties will be available for developers and security researchers.
+
+**✅ FEVM goes live on mainnet**
+
+_Completed on March 14th 2023_
+
+The Filecoin EVM runtime is deployed on Filecoin mainnet via the [Filecoin nv18 Hygge upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-4313888).
+
+### 🔄 Milestone 2.2
+
+**🔄 Ability to deploy Wasm actors to mainnet**
+
+_To complete midway through 2023_
+
+Developers will be able to deploy custom smart contracts written in Rust, AssemblyScript, or Go, and compiled to WASM bytecode. SDKs, tutorials, and other developer materials will be generally available. This milestone requires a network upgrade.
+
+### 🔮 Milestone 3+
+
+**🔮 Further incremental protocol refactors to enhance programmability**
+
+_To complete in 2023_
+
+A series of additional incremental protocol upgrades (besides nv17) to move system functionality from privileged space to user space. The result will be a lighter and less opinionated base Filecoin protocol, where storage markets, deal-making, incentives, etc. are extensible, modular, and highly customizable through user-deployed actors. Enhanced programming features such as user-provided cron, asynchronous call patterns, and more will start to be developed at this stage.
diff --git a/smart-contracts/fundamentals/support.md b/smart-contracts/fundamentals/support.md
new file mode 100644
index 000000000..018000c4c
--- /dev/null
+++ b/smart-contracts/fundamentals/support.md
@@ -0,0 +1,23 @@
+---
+description: >-
+ If you need assistance while exploring the Filecoin virtual machine, you can
+ reach out to the team and community using the links on this page.
+---
+
+# Support
+
+## Slack
+
+Like many other distributed teams, the FVM team works mostly on Slack. You can join the Filecoin Project Slack for free by going to [`filecoin.io/slack`](https://filecoin.io/slack/). The FVM team hangs out in the following channels:
+
+* [`#fil-builders`](https://filecoinproject.slack.com/archives/CRK2LKYHW) for building solutions on FVM and Filecoin
+* [`#fil-fvm-dev`](https://filecoinproject.slack.com/archives/C029MT4PQB1) for development of the FVM
+* [`#fvm-docs`](https://filecoinproject.slack.com/archives/C03MDFERKMJ) for FVM documentation
+
+## Forum
+
+If you just need a general pointer or looking for technical FAQs, you can head over to the [FVM GitHub Discussion tab](https://github.com/filecoin-project/community/discussions/categories/developers).
+
+## Developer grants
+
+The [Filecoin Grant Platform](https://github.com/filecoin-project/devgrants) connects grant makers with builders and researchers in the Filecoin community. Whether you represent a foundation that wants to move the space forward, a company looking to accelerate development on the features your application needs, or a developer team itching to hack on the FVM, [take a look at the supported grant types and available opportunities →](https://github.com/filecoin-project/devgrants)
diff --git a/smart-contracts/fundamentals/the-fvm.md b/smart-contracts/fundamentals/the-fvm.md
new file mode 100644
index 000000000..5041e357d
--- /dev/null
+++ b/smart-contracts/fundamentals/the-fvm.md
@@ -0,0 +1,224 @@
+---
+description: >-
+ The Filecoin virtual machine (FVM) is a runtime environment for smart
+ contracts on the Filecoin network. These smart contracts, also called actors,
+ can be written in Solidity.
+---
+
+# The FVM
+
+## Features
+
+The Filecoin virtual machine (FVM) was created to enable developers to build new use cases on top of the Filecoin network. Data access control, data DAOs, perpetual storage, collateral leasing, and Ethereum-compatible fungible and non-fungible tokens are just some of the use-cases for the FVM.
+
+### Data access control
+
+The FVM allows Filecoin network participants to limit individual access to certain data sets, an advantage previously only available using centralized storage solutions.
+
+### Data DAO
+
+FVM data access control enables the creation and management of data centered decentralized-autonomous-organizations, which can govern and monetize data access and pool returns into a shared treasury.
+
+### Perpetual storage
+
+The FVM allows users to store data permanently, managed by repair and replication bots, which also benefit from Filecoin’s verifiable storage proofs.
+
+### Leasing
+
+FIL token holders can use their holdings to provide storage collateral and receive leasing fees. Community-generated reputation scores enable everyone to identify good borrowers.
+
+### Ethereum compatibility
+
+The FVM is fully EVM-compatible, allowing new ERC-20 tokens to be launched on the Filecoin network and enabling layer two networks to leverage its storage power and security. The majority of EVM tooling can also be used within the Filecoin ecosystem.
+
+## Use-cases
+
+The FVM can be used for the creation of a new class of web3 dApps , many of which will have the potential to become 10x improvements to the network’s functionality and beyond. The FVM team and members of the Filecoin community have discussed what can be built with the FVM. Some ideas are:
+
+### Tokenized datasets and Data DAOs
+
+Kickstart the dataset economy by tokenizing dataset and representing the value of those datasets to society. Exchange those data tokens between peers and request computation services on that data, such as validation, joins, analysis, feature detection, and extraction.
+
+### Trustless reputation systems
+
+Imagine an overlay network of nodes that could patrol the network performing random deals with storage providers (SPs) in order to gauge key metrics like quality of service guarantees, performance, latency, and region details. These nodes can then store SP reputation scores on-chain, making them traceable and verifiable while enabling users to decide on the terms of service of the SPs that they want to use for their data.
+
+### Replication workers
+
+Imagine anyone is able to write a new smart contract that makes new deals to maintain a specific level of replication of that dataset in the network. You could ensure the resiliency of your data by ensuring it is always stored n times automatically.
+
+The smart contract could also be able to transfer your data just once into the Filecoin network and have a trustless actor replicate and send that n times to multiple storage locations. You could even create a user-defined policy of rules for that data to obey - specifying things like region and location, latency, and even price. This can all be built right into the smart contract flow in FVM.
+
+### Smarter storage markets
+
+Imagine richer functionality in storage markets with features like auto-renewal of deals or self-repairing deals in the event of sector issues. Other possibilities are time-locked or event-driven data deals where retrieval only occurs under a specified time frame or event trigger.
+
+### The list goes on
+
+There are many more use cases to unlock with FVM. Some other projects include:
+
+* NFTs minted, exchanged, and stored under a single roof.
+* Storage bounties and auction mechanisms.
+* Enabling L2 bridges.
+* Futures and derivatives on storage that compose in DeFi fashion.
+* Conditional leases for sector pledging.
+
+If you have a great idea or suggestion, join the discussion on the [FVM forum](https://fvm-forum.filecoin.io).
+
+## Project blueprints
+
+Here is a collection of blueprint examples that developers can use to design and create their projects on Filecoin.
+
+### Data DAO solution
+
+A Data DAO enables the creation of a dataset economy where users can capture and represent the value of those datasets to society. It’s even possible to exchange those data tokens between peers and request computation services on that data, such as validation, joins, analysis, feature detection, and extraction.
+
+There are many ways to create a Data DAO. This document will only focus on one of the possibilities for the purpose of example.
+
+As the [RFS](https://rfs.fvm.dev/) describes, Data DAOs enable groups of people to put together resources to preserve and utilize the data that are useful for their stakeholders. Imaging a Data DAO can mint a token $DATA, and incentivize storage providers to replicate the data it wants to store. The Data DAO can specify the data it wants to replicate and the number of replications it desires. For every replication, the Data DAO will mint some $DATA and send them to the SP as rewards. How datasets are chosen is left up to the governance process of the Data DAO.
+
+#### **Solution Architecture**
+
+I highly recommend that you read through the [“Core Idea” section in this README](https://github.com/lotus-web3/client-contract) before continue reading this document.
+
+**Role management**
+
+The contract has to specify the admin of the Data DAO either during the creation of the contract or through a permission change contract call.
+
+**ERC20 token**
+
+The Data DAO should be the minter of a standard ERC20 token $DATA and have the ability to mint $DATA. For example, a [ERC20PresetMinterPauser contract](https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/token/ERC20/presets/ERC20PresetMinterPauser.sol) from OpenZeppelin can be used.
+
+**Publish a deal to the Data DAO**
+
+Storage providers should seal the data and publish the deal information to the market actor by calling `publish_deal` on the market actor. The Data DAO will act as the client of the deal. (The command to seal the data and generate deal information are under development and will be updated here when more information is available) ([`publish_deal` is called `publish_storage_deals` in the mock solidity API](https://github.com/Zondax/fevm-solidity-mock-api/blob/master/contracts/v0.8/MarketAPI.sol#L170)).
+
+The market actor will call the [AuthenticateMessage](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0044.md) native method on the Data DAO contract to know if this deal should be created. This method will be called using the FRC42 method number as specified in the linked FRC.
+
+You can handle this callback by exposing a `handle_filecoin_method(uint64, uint64, bytes)` Solidity method, which is how the FEVM runtime routes inbound FRC42 calls. See [this example](https://github.com/lotus-web3/client-contract/blob/8b53caadd9f7b028f897dfcd28ec2ca9ae98b9e3/src/DealClient.sol#LL49).
+
+The Data DAO contract should check if the deal was published according to its business logic. For our example:
+
+* If the SP has an `admin` role, all the deals created by the SP should be accepted, and the Data DAO contract should start tracking the proposal and the number of replications of this CID
+* If the CID of the deal does not have enough replications, the Data DAO contract should allow the creation of this deal
+* The Data DAO should reject other deals, not in the above cases
+
+The Data DAO contract should mint some $DATA and send it to the storage provider who successfully published the deal.
+
+
+
+**Retrieve the information and data from the Data DAO**
+
+* The Data DAO contract should have a method that provides all the deals managed by it.
+* The Data DAO contract should have a method that provides all the CIDs it wants to replicate and the current number of replications, and the desired number of replications.
+ * It should have a mechanism to refresh the number of replications based on the `stard_epoch` and `end_epoch` attributes of each deal it manages.
+* Users can retrieve the data of the CID by using the `lotus client retrieve` command.
+
+#### **Possible future directions**
+
+Instead of letting contract admins decide which CIDs to preserve, the Data DAO contract can implement different mechanisms to decide what to store. For example, the contract can let users vote on what to store, or they can let $DATA holders vote.
+
+The Data DAO contract can decide how to incentivize SPs by implementing their business logic about how to distribute $DATA or introduce tokenomics such as staking.
+
+### Perpetual Storage
+
+There are many use cases in the world that need perpetual storage. For example, the safe and indefinite storage of NFTs would greatly assist the NFT marketplace.
+
+Filecoin deals have an expiration date attached to them, and after the expiration date, deals expire, and data is lost. With the FVM, uploaders can specify the number of replications they want and the desired expiration date. The expiration date can be a long time in the future or even indefinitely. As long as the uploader still has funds (FIL) in the contract account, the contract will keep incentivizing storage providers to create deals to meet the goal of replication.
+
+#### **Solution architecture**
+
+I highly recommend that you [read through the “Core Idea” section in this README](https://github.com/lotus-web3/client-contract) before continuing to read this document.
+
+
+
+**Deposit funds**
+
+* Uploaders can deposit funds into the PerpetualStorage contract
+
+**PerpetualDeal creation**
+
+* The uploader should upload the file to a place where storage providers can download the data from, such as IPFS or AWS S3, and get the URL of the data
+* The uploader should send the URL of the data, the desired number of replication, and the desired expiration date to the PerpetualStorage contract to create a PerpetualDeal
+ * The dApp front end can provide an estimation of how long the data can be stored based on the amount of FIL the uploader has in the contract.
+ * The contract should reject the creation of the PerpetualDeal if the uploader does not have enough funds deposited in the contract.
+* The PerpetualStorage contract should create and update PerpetualDealAd based on the storage situation of each PerpetualDeal.
+ * The PerpetualStorage contract should determine the FIL it wants to give out for each PerpetualDealAd based on its business logic. For example, it can provide more bonuses if the PerpetualDeal does not have many replications or if its storage deals are about to expire.
+
+**PerpetualDeal information**
+
+* The PerpetualStorage contract should provide an interface for storage providers to query the information about PerpetualDeals, including the URL of the data, the desired expiration date, the current number of replication, and the storage deals created by other storage providers.
+* The PerpetualStorage contract should provide an interface for storage providers to query PerpetualDealAd.
+* The PerpetualStorage contract should provide an interface for storage providers to query the funds that the uploader has put into the PerpetualStorage contract.
+
+**Storage deals creation**
+
+* Storage providers can look at the list of PerpetualDealAd and determine which PerpetualDealAd they want to store.
+* Storage providers should download the content of the PerpetualDealAd. They should try to download from the URL of the content or use the CID to download the content from other storage providers.
+* Storage providers should seal the data of the PerpetualDealAd and publish the deal information to the market actor by calling `publish_deal` on the market actor. The PerpetualStorage contract will act as the client of the deal. (The command to seal the data and generate deal information is under development and will be updated here when more information is available) ([`publish_deal` is called `publish_storage_deals` in the mock solidity API](https://github.com/Zondax/fevm-solidity-mock-api/blob/master/contracts/v0.8/MarketAPI.sol#L170)).
+ * The storage provider should put the PerpetualDealAd’s id into the label (this is the field used to store arbitrary data) of the deal, so the PerpetualStorage contract can identify which PerpetualDealAd the storage provider is targeting.
+ * The market actor will call the [AuthenticateMessage](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0044.md) native method on the PerpetualStorage contract to know if this deal should be created. This method will be called using the FRC42 method number as specified in the linked FRC.
+ * You can handle this callback by exposing a `handle_filecoin_method(uint64, uint64, bytes)` Solidity method, which is how the FEVM runtime routes inbound FRC42 calls. [See this example](https://github.com/lotus-web3/client-contract/blob/8b53caadd9f7b028f897dfcd28ec2ca9ae98b9e3/src/DealClient.sol#LL49).
+ * The PerpetualStorage contract should check if this replication is valid and send FIL to the storage provider if it successfully creates the deal.
+
+### Collateral Leasing
+
+Storage providers (SPs) have to post collateral (in FIL) to onboard storage capacity to the network and to accept storage deals. This collateral incentivizes the storage provider to behave correctly by presenting timely proofs of the health of the data (PoRep, PoSt), or they risk getting slashed.
+
+While important, the need to pledge collateral creates friction and an immediate barrier that throttles SP participation and growth. On the other hand, the Filecoin network has a large base of long-term token holders that would like to see the network grow, and are willing to lend their FIL to reputable and growth-oriented SPs.
+
+Collateral leasing can solve this issue. Storage providers can lease FIL collateral from token holders, and the smart contract will lock the future income (block rewards) until the storage providers have repaid their leased FIL.
+
+#### **Required addresses**
+
+**Owner address**
+
+* Income and returned collateral are paid to this address.
+* This address is also allowed to change the worker address for the miner
+* `change_owner_address` method can change the owner address
+
+**Beneficiary address**
+
+* Beneficiary is an entity the owner can declare that is allowed to withdraw some of the storage providers FIL in available balance (as opposed to locked collateral)
+* There is a [BeneficaryTerm](https://github.com/Zondax/fevm-solidity-mock-api/blob/97d1c578c2787868ac5fdd1de46ed9c4cd11cc97/contracts/v0.8/typeLibraries/CommonTypes.sol#L75) that indicates.
+ * how much the beneficiary can withdraw.
+ * the expiration date.
+ * how much the beneficiary has withdrawn.
+* The Beneficiary is set to the same address as the owner when first creating a miner without specifying a beneficiary address.
+* [Get\_beneficiary method](https://github.com/Zondax/fevm-solidity-mock-api/blob/97d1c578c2787868ac5fdd1de46ed9c4cd11cc97/contracts/v0.8/MinerAPI.sol#L64) can return current beneficiary information.
+* [ChangeBeneficary method](https://github.com/Zondax/fevm-solidity-mock-api/blob/97d1c578c2787868ac5fdd1de46ed9c4cd11cc97/contracts/v0.8/MinerAPI.sol#L52) can specify a new beneficiary.
+
+**Worker address**
+
+#### **Collateral leasing solution architecture**
+
+**Deposit method**
+
+* The FIL token holder can call a `deposit` method on the LendingMarket contract to deposit the FIL into the contract
+* The LendingMarket keeps track of the amount each token holder deposits and their gain/loss
+
+**Collateral underwriting (can be custom to lender off-chain)**
+
+* (off-chain) The storage providers submits the desired lease amount and lease period to the lease market.
+* (off-chain) The lease market determines the leasing fees rate based on the on-chain information of the miner, such as slash rate, length of operations, power, …
+* (off-chain) The lease market generates a signed lease specification that can be submitted on-chain, including the lease amount, lease period, and leasing fees rate.
+* (off-chain) The storage provider submits the signed lease specification lease amount, lease period, and leasing fees rate to the LendingMarket contract to create the lease.
+
+**Creating miner actors, owner contracts, and beneficiary contract**
+
+* The LendingMarket contract will create a smart contract (LoanAgent) using (CREATE2):
+ * LoanAgent contract will serve as the owner and beneficiary of the miner actor
+* The miner will transfer the ownership to the smart contract owner through a separate message submitted externally. In the future, there will be a `Miner` method to change the owner address.
+* The LendingMarket contract will check if the `miner` actor’s owner is the LoanAgent
+* The LendingMarket calls the LoanAgent contract to call the ChangeBeneficiary method on the miner actor to specify the LoanAgent as its beneficiary
+
+**Repayment of lease**
+
+* The miner actor will accumulate block rewards as long as the storage providers keep providing storage to the network
+* The storage providers should be able to call a method on the LendingMarket contract to get the repayment schedule
+ * Including the next payment date and the amount expected to be paid
+* The LoanAgent should pull the required fund from the actor according to the repayment schedule
+* Whenever a repayment is made, the LendingMarket contract should calculate the leasing fees each lender should get
+* After all the repayments are completed, the owner should propose changing the beneficiary to itself, and the beneficiary should approve it
+
+
diff --git a/smart-contracts/smart-contracts.md b/smart-contracts/smart-contracts.md
new file mode 100644
index 000000000..225638f91
--- /dev/null
+++ b/smart-contracts/smart-contracts.md
@@ -0,0 +1,2 @@
+# Smart contracts
+
diff --git a/storage-providers/architecture/README.md b/storage-providers/architecture/README.md
new file mode 100644
index 000000000..a89b73351
--- /dev/null
+++ b/storage-providers/architecture/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ This section covers the architectural components and processes that storage
+ providers should be aware of when creating their infrastructure.
+---
+
+# Architecture
+
diff --git a/storage-providers/architecture/lotus-automation.md b/storage-providers/architecture/lotus-automation.md
new file mode 100644
index 000000000..9bbec693d
--- /dev/null
+++ b/storage-providers/architecture/lotus-automation.md
@@ -0,0 +1,36 @@
+---
+description: >-
+ 1-click deployment automation for the storage provider stack allows new
+ storage providers to quickly learn and deploy Lotus and Boost.
+---
+
+# Storage provider automation
+
+{% hint style="info" %}
+[Find the automation code here!](https://github.com/ng-solutions-architecture/lotus-automation)
+{% endhint %}
+
+## Why this automation?
+
+It can be rather overwhelming for new storage providers to learn everything about Filecoin and the various software components. In order to help with the learning process, we provide a fully automated installation of the Lotus and Boost stack. This automation should allow anyone to go on mainnet or the Calibration testnet in no time.
+
+## What are we automating?
+
+This automation is still evolving and will receive more features and capabilities over time. In its current state, it lets you:
+
+* Install and configure Lotus Daemon to interact with the blockchain.
+* Initialize and configure Lotus Miner to join the network as a storage provider.
+* Install and configure Boost to accept storage deals from clients.
+* Install and configure Booster-HTTP to provide HTTP-based retrievals to clients.
+
+## Sealing configuration
+
+The initial use case of this automation is to use sealing-as-a-service instead of doing your own sealing. As such, there is no Lotus Worker configured for the setup. It is possible to extend the setup with a remote worker. However, this Lotus Worker will require dedicated and custom hardware.
+
+## Compossible deployment
+
+One of the next features coming to this automation is a compossible deployment method. Today Lotus Daemon, Lotus Miner, and Boost are all installed on a single machine. Many production setups, however, will split out those services into their own dedicated hardware. A compossible deployment will allow you to deploy singular components on separate servers.
+
+## Prerequisites
+
+Read the `README` carefully on the [GitHub repo](https://github.com/ng-solutions-architecture/lotus-automation) to make sure you have all the required prerequisites in place.
diff --git a/storage-providers/architecture/lotus-components.md b/storage-providers/architecture/lotus-components.md
new file mode 100644
index 000000000..abf44782b
--- /dev/null
+++ b/storage-providers/architecture/lotus-components.md
@@ -0,0 +1,166 @@
+---
+description: >-
+ Lotus is the reference implementation for the Filecoin network. It is
+ maintained by Protocol Labs. Understanding the components of Lotus is
+ necessary for building well-balanced storage provider.
+---
+
+# Lotus components
+
+The diagram below shows the major components of Lotus:
+
+
+
+The following components are the most important to understand:
+
+* [Lotus daemon](lotus-components.md#lotus-daemon)
+* [Lotus miner](lotus-components.md#lotus-miner)
+* [Lotus worker](lotus-components.md#lotus-worker)
+
+## Lotus daemon
+
+The daemon is a key Lotus component that does the following:
+
+* Syncs the chain
+* Holds the wallets of the storage provider
+
+The machine running the Lotus daemon must be connected to the public internet for the storage provider to function. See the [Lotus documentation](https://lotus.filecoin.io/storage-providers/setup/initialize/#connectivity-to-the-storage-provider) for more in-depth information on connectivity requirements.
+
+### Syncing the chain
+
+Syncing the chain is a key role of the daemon. It communicates with the other nodes on the network by sending messages, which are, in turn, collected into [blocks](../../reference/general/glossary.md#block). These blocks are then collected into [tipsets](../../reference/general/glossary.md#tipset). Your Lotus daemon receives the messages on-chain, enabling you to maintain consensus about the state of the Filecoin network with all the other participants.
+
+Due to the growth in the size of the chain since its genesis, it is not advised for storage providers to sync the entire history of the network. Instead, providers should use the available [lightweight snapshots](https://lotus.filecoin.io/kb/chain-snapshots/) to import the most recent messages. One exception in which a provider would need to sync the entire chain would be to run a [blockchain explorer](../../networks/mainnet/explorers.md).
+
+Synced chain data should be stored on an SSD; however, faster NVMe drives are strongly recommended. A slow chain sync can lead to delays in critical messages being sent on-chain from your Lotus miner, resulting in the faulting of sectors and the slashing of collateral.
+
+Another important consideration is the size of the file system and available free space. Because the Filecoin chain grows as much as 50GB a day, any available space will eventually fill up. It is up to storage providers to manage the size of the chain on disk and prune it as needed. Solutions like [SplitStore](https://lotus.filecoin.io/lotus/configure/splitstore/) (enabled by default) and [compacting](https://lotus.filecoin.io/lotus/manage/chain-management/#compacting-the-chain-data) reduce the storage space used by the chain. Compacting involves replacing the built-up chain data with a recent lightweight snapshot.
+
+### Holding wallets
+
+Another key role of the Lotus daemon is to host the Filecoin wallets that are required to run a storage provider (SP). As an SP, you will need a minimum of 2 wallets: an _owner wallet_ and a _worker wallet_. A third wallet called the _control_ wallet is required to scale your operations in a production environment.
+
+To keep wallets safe, providers should consider physical access, network access, software security, and secure backups. As with any cryptocurrency wallet, access to the private key means access to your funds. Lotus supports [Ledger hardware wallets](https://lotus.filecoin.io/lotus/manage/ledger/), the use of which is recommended. The worker and control wallets can not be kept on a hardware device because Lotus requires frequent access to those types of wallets. For instance, Lotus may require access to a worker or control wallet to send [WindowPoSt](../../reference/general/glossary.md#window-proof-of-spacetime-windowpost) proofs on-chain.
+
+For information on how to view wallets and their funds, see [Helpful commands](lotus-components.md#view-wallets-and-funds). For information and instructions on integrating your Ledger device with Lotus, see the following video:
+
+{% embed url="https://www.youtube.com/watch?v=u_S5F6WyBAU" %}
+
+#### **Control wallets**
+
+Control wallets are required to scale your operations in a production environment. In production, only using the general worker wallet increases the risk of message congestion, which can result in delayed message delivery on-chain and potential sector faulting, slashing, or lost block rewards. It is recommended that providers create wallets for each subprocess. There are five different types of control wallets a storage provider can create:
+
+* PoSt wallet
+* PreCommit wallet
+* Commit wallet
+* Publish storage deals wallet
+* Terminate wallet
+
+## Lotus miner
+
+The Lotus miner, often referred to using the daemon naming syntax `lotus-miner`, is the process that coordinates most of the storage provider activities. It has 3 main responsibilities:
+
+* Storing sectors and data
+* Scheduling jobs
+* Proving the stored data
+
+### Storing sectors and data
+
+Storage Providers on the Filecoin network store sectors. There are two types of sectors that a provider may store:
+
+* Sealed sectors: these sectors may or may not actually contain data, but they provide capacity to the network, for which the provider is rewarded.
+* Unsealed sectors: used when storing data deals, as retrievals happen from unsealed sectors.
+
+Originally, `lotus-miner` was the component with storage access. This resulted in `lotus-miner` hardware using internal disks, directly attached storage shelves like [JBODs](https://en.wikipedia.org/wiki/Non-RAID\_drive\_architectures#JBOD), Network-Attached-Storage (NAS), or a storage cluster. However, this design introduced a bottleneck on the Lotus miner.
+
+More recently, Lotus has added a more scalable storage access solution in which workers can also be assigned storage access. This removes the bottleneck from the Lotus miner. Low-latency storage access is critical because of the impact on storage-proving processes.
+
+Keeping a backup of your sealed sectors, the cache directory, and any unsealed sectors is crucial. Additionally, you should keep a backup of the `sectorstore.json` file that lives under your storage path. The `sectorestore.json` file is required to restore your system in the event of a failure. You can read more about the `sectorstore.json` file in the [Lotus docs](https://lotus.filecoin.io/).
+
+It is also imperative to have at least a daily backup of your `lotus-miner` state. Backups can be made with:
+
+```shell
+lotus-miner backup
+```
+
+{% hint style="info" %}
+For information on how to back up Lotus miner state, see [Helpful commands](lotus-components.md#backup-lotus-miner-state).
+{% endhint %}
+
+The `sectorstore.json` file, which lives under your storage path, is also required for restoration in the event of a failure. You can read more about the file in the [Lotus docs](https://lotus.filecoin.io/storage-providers/seal-workers/seal-workers/#sector-storage-groups).
+
+## Scheduling jobs
+
+Another key responsibility of the Lotus Miner is the scheduling of jobs for the [sealing pipeline](sealing-pipeline.md) and storage proving.
+
+For information on how to view scheduled jobs, see [View scheduled jobs](lotus-components.md#view-scheduled-jobs). For information on how to see the workers on which the miner can schedule jobs, see [View available workers](lotus-components.md#view-available-workers).
+
+### Storage proving
+
+One of the most important roles of `lotus-miner` is the [Storage proving](lotus-components.md#storage-proving). Both [WindowPoSt](../../reference/general/glossary.md#window-proof-of-spacetime-windowpost) and [WinningPoSt](../../reference/general/glossary.md#winning-proof-of-spacetime-winningpost) processes are usually handled by the `lotus-miner` process. For scalability and reliability purposes it is now also possible to run these proving processes on dedicated servers (proving workers) instead of using the Lotus miner.
+
+The proving processes require low-latency access to sealed sectors. The proving challenge requires a GPU to run on. The resulting `zkProof` will be sent to the chain in a message. Messages must arrive within 30 minutes for WindowPoSt, and 30 seconds for WinningPoSt. It is extremely important that providers properly size and configure the proving workers, whether they are using just the Lotus miner or separate workers. Additionally, dedicated wallets, described in [Control wallets](lotus-components.md#control-wallets), should be set up for these processes.
+
+Always check if there are upcoming proving deadlines before halting any services for maintenance. For detailed instructions, refer to the [Lotus maintenance guide](https://lotus.filecoin.io/storage-providers/operate/maintenance/).
+
+For information on how to check if there are upcoming proving deadlines, see [Helpful commands](lotus-components.md#helpful-commands).
+
+## Lotus worker
+
+The Lotus worker is another important component of the Lotus architecture. There can be - and most likely will be - multiple workers in a single storage provider setup. Assigning specific roles to each worker enables higher throughput, [sealing rate](sealing-rate.md), and improved redundancy.
+
+As mentioned above, proving tasks can be assigned to dedicated workers, and workers can also get storage access. The remaining worker tasks encompass running a [sealing pipeline](sealing-pipeline.md), which is discussed in the next section.
+
+## Helpful commands
+
+The following commands can help storage providers with their setup.
+
+### Backup Lotus miner state
+
+It is imperative to have at least one daily backup of your Lotus miner state. Backups can be made using the following command:
+
+```shell
+lotus-miner backup
+```
+
+### View wallets and funds
+
+You can use the following command to view wallets and their funds:
+
+```shell
+lotus wallet list
+```
+
+### Check storage configuration
+
+Run the following command to check the storage configuration for your Lotus miner instance:
+
+```shell
+lotus-miner storage list
+```
+
+This command return information on your _sealed space_ and your _scratch space_, otherwise known as a cache. These spaces are only available if you have properly configured your Lotus miner by following the steps described in the [Lotus documentation](https://lotus.filecoin.io/storage-providers/operate/custom-storage-layout/).
+
+### View scheduled jobs
+
+To view the scheduled sealing jobs, run the following:
+
+```shell
+lotus-miner sealing jobs
+```
+
+### View available workers
+
+To see the workers on which the miner can schedule jobs, run:
+
+```shell
+lotus-miner sealing workers
+```
+
+### View proving deadlines
+
+To check if there are upcoming proving deadlines, run the following:
+
+```shell
+lotus-miner proving deadlines
+```
diff --git a/storage-providers/architecture/network-indexer.md b/storage-providers/architecture/network-indexer.md
new file mode 100644
index 000000000..aca4e8573
--- /dev/null
+++ b/storage-providers/architecture/network-indexer.md
@@ -0,0 +1,27 @@
+---
+description: >-
+ InterPlanetary Network Indexer (IPNI) enables users to search for
+ content-addressable data available from storage providers. This page discusses
+ the implications of IPNI for storage providers.
+---
+
+# Network indexer
+
+A _network indexer_, also referred to as an _indexer node_ or _indexer_, is a node that maps content identifiers (CIDs) to records of who has the data and how to retrieve that data. These records are called _provider data records_. Indexers are built to scale in environments with massive amounts of data, like the Filecoin network, and are also used by the IPFS network to locate data. Because the Filecoin network stores so much data, clients can’t perform efficient retrieval without proper indexing. Indexer nodes work like a specialized key-value store for efficient retrieval of content-addressed data.
+
+There are two groups of users within the network indexer process:
+
+* _Storage providers_ advertise their available content by storing data in the indexer. This process is handled by the indexer’s _ingest_ logic.
+* _Retrieval clients_ query the indexer to determine which storage providers have the content and what protocol to use, such as Graphsync, Bitswap, etc. This process is handled by the indexer’s _find_ logic.
+
+## How the indexer works
+
+This diagram summarizes the different _actors_ in the indexer ecosystem and how they interact with each other. In this context, these actors are not the same as [smart-contract actors](../../smart-contracts/filecoin-evm-runtime/actor-types.md).
+
+For more info on how the indexer works, read the Filecoin blog post .
+
+## IPNI and storage providers
+
+Storage providers publish data to indexers so that clients can find that data using the CID or multihash of the content. When a client queries the indexer using a CID or multihash, the indexer then responds to the client with the provider data record, which tells the client where and how the content can be retrieved.
+
+As a storage provider, you will need to run an indexer in your setup so that your clients know where and how to retrieve data. For more information on how to create an index provider, see the [IPNI documentation](https://github.com/ipni/storetheindex/blob/main/doc/creating-an-index-provider.md).
diff --git a/storage-providers/architecture/sealing-as-a-service.md b/storage-providers/architecture/sealing-as-a-service.md
new file mode 100644
index 000000000..5022c8d2f
--- /dev/null
+++ b/storage-providers/architecture/sealing-as-a-service.md
@@ -0,0 +1,31 @@
+---
+description: >-
+ This page describes how sealing-as-a-service works, and the benefits to
+ storage providers.
+---
+
+# Sealing-as-a-service
+
+Storage providers with hardware cost or availability constraints can use _Sealing-as-a-service_, in which another provider performs sector sealing on the storage providers behalf. This page describes how sealing-as-a-service works, and the benefits to storage providers.
+
+### Overview
+
+In a traditional setup, a storage provider needs high-end hardware to build out a [sealing pipeline](sealing-pipeline.md). Storage providers with hardware cost or availability constraints can use _Sealing-as-a-Service_ providers, where another provider performs sector sealing on the storage provider’s behalf. In this model, the following occurs:
+
+1. The storage provider provides the data to the sealer
+2. The sealer seals the data into sectors.
+3. The sealer returns the sealed sectors in exchange for a service cost.
+
+### Benefits
+
+Sealing-as-a-service provides multiple benefits for storage providers:
+
+* Available storage can be filled faster, thereby maximizing block rewards, without investing in a complex, expensive sealing pipeline.
+* Bigger deals can be onboarded, as Sealing-as-a-Service essentially offers a burst capability in your sealing capacity. Thus, storage providers can take on larger deals without worrying about sealing time and not meeting client expectations.
+* Storage capacity on the Filecoin network can be expanded without investing in a larger sealing pipeline.
+
+Other solutions are possible where the sealing partner seals committed capacity (CC) sectors for you, which you in turn [snap up](../filecoin-deals/snap-deals.md) to data sectors.
+
+See the following video from [Aligned](https://aligned.co/sealing-as-a-service) about their offering of Sealing-as-a-Service:
+
+{% embed url="https://www.youtube.com/watch?v=v4l1lGsUXvs" %}
diff --git a/storage-providers/architecture/sealing-pipeline.md b/storage-providers/architecture/sealing-pipeline.md
new file mode 100644
index 000000000..8e28d43cc
--- /dev/null
+++ b/storage-providers/architecture/sealing-pipeline.md
@@ -0,0 +1,83 @@
+---
+description: >-
+ The process of sealing sectors is called the sealing pipeline. It is important
+ for storage providers to understand the steps of the process.
+---
+
+# Sealing pipeline
+
+Each step in the sealing process has different performance considerations, and fine-tuning is required to align the different steps optimally. For example, storage providers that don’t understand the process expected throughput may end up overloading the sealing pipeline by trying to seal too many sectors at once or taking on a dataset that is too large for available infrastructure. This can lead to a slower _sealing rate_, which is discussed in greater detail in [Sealing Rate](sealing-rate.md).
+
+## Overview
+
+The sealing pipeline can be broken into the following steps:
+
+
+
+### AddPiece
+
+The sealing pipeline begins with _AddPiece_ (AP), where the pipeline takes a _Piece_ and prepares it into the sealing scratch space for the _PreCommit 1_ task (PC1) to take over. In Filecoin, a _Piece_ is data in CAR-file format produced by an [IPLD DAG](https://ipld.io) with a corresponding `PayloadCID` and `PieceCID`. The maximum Piece size is equal to the sector size, which is either 32 GiB or 64 GiB. If the content is larger than the sector size, it must be split into more than one `PieceCID` during data preparation.
+
+The AddPiece process is not a very intensive process and only uses some CPU cores; it doesn’t require the use of a GPU. It is typically co-located on a server with other worker processes from the sealing pipeline. As PC1 is the next process in the sealing pipeline, running AddPiece on the same server as the PC1 process is a logical architecture configuration.
+
+Consider limiting the AP process to a few cores by using the [`taskset` command](https://man7.org/linux/man-pages/man1/taskset.1.html), where `` is the range on which cores the process needs to run on:
+
+```shell
+taskset -c lotus-worker run ...
+```
+
+### PreCommit 1
+
+PreCommit 1 (PC1) is the most intensive process of the entire sealing pipeline. PC1 is the step in which a sector, regardless of whether it contains data or not, is cryptographically secured. The worker process loads cryptographic parameters from a cache location, which should be stored on enterprise NVMe for latency reduction. These parameters are then used to run Proof-of-Replication (PoRep) SDR encoding against the sector that was put into the sealing scratch space. This task is single-threaded and very CPU intensive, so it requires a CPU with SHA256 extensions. Typical CPUs that meet this requirement include the AMD Epyc Milan/Rome or an Intel Xeon Ice Lake with 32 cores or more.
+
+Using the scratch space, the PC1 task will create 11 layers of the sector. Storage providers must host scratch space for this on enterprise NVMe. This means that:
+
+* Every sector consumes memory equal to 1+11 times its size on the scratch volume.
+* For a 32 GiB sector, PC1 requires 384 GiB on the scratch volume
+* For a 64 GiB sector, PC1 requires 768 GiB.
+
+In order to seal at a decent rate and to make use of all the sealing capacity in a PC1 server, you will run multiple PC1 workers in parallel on a system. You can learn more about this in the chapter on [Sealing Rate](sealing-rate.md). Sealing several sectors multiplies the requirements on CPU cores, RAM, and scratch space by the number of sectors being sealed in parallel.
+
+In order to achieve a decent sealing rate and make use of all sealing capacity in a PC1 server, storage providers should run multiple PC1 processes in parallel on a system. More information on this can be found in [Sealing Rate](sealing-rate.md).
+
+The process of sealing a single 32 GiB sector takes roughly **3 hours**.
+
+### PreCommit 2
+
+When PC1 has been completed on a given sector, the entire scratch space for that sector is moved over to the _PreCommit 2 (PC2)_ task. This task is typically executed on a different server than the PC1 server because it behaves differently. In short, PC2 validates PC1 using the Poseidon hashing algorithm over the Merkle Tree DAG that was created in PC1. As mentioned in the previous section, the entire scratch space is either 384 GiB or 768 GiB, depending on the sector size.
+
+Where PC1 is CPU-intensive, PC2 is executed on GPU. This task is also notably shorter in duration than PC1, typically 10 to 20 minutes on a capable GPU. This requires a GPU with at least 10 GiB of memory and 3500+ CUDA cores or shading units, in the case of Nvidia. Storage providers can use slower GPUs, but this may create a bottleneck in the sealing pipeline.
+
+For best performance, compile Lotus with CUDA support instead of OpenCL. For further information, see the Lotus [CUDA Setup](https://lotus.filecoin.io/tutorials/lotus-miner/cuda/)
+
+In the case of a [Snap Deal](../filecoin-deals/snap-deals.md), an existing committed capacity sector is filled with data. When this happens, the entire PC1 task does not run again; however, the snapping process employs PC1’s `replica-update` and `prove-replica-update` to add the data to the sector. This can run on the PC2 worker or on a separate worker depending on your sealing pipeline capacity.
+
+When PC2 is complete for a sector, a _precommit_ message is posted on-chain. If batching is configured, Lotus will batch these messages to avoid sending messages to the chain for every single sector. In addition, there is a configurable timeout interval, after which the message will be sent on-chain. This timeout is set to 24 hours by default. These configuration parameters are found in the `.lotusminer/config.toml` file.
+
+If you want to force the pre-commit message on-chain for testing purposes, run:
+
+```shell
+lotus-miner sectors batching precommit --publish-now
+```
+
+The sealed sector and its 11 layers are kept on the scratch volume until Commit 2 (C2) is complete.
+
+### WaitSeed
+
+WaitSeed is not an actual task that is executed, but it is a step in the pipeline in which the blockchain forces the pipeline to wait for 150 epochs as a built-in security mechanism. With Filecoin’s 30 second epochs, this means 75 minutes must elapse between PC2 and the next task, Commit 1 (C1).
+
+### Commit 1
+
+The Commit 1 (C1) phase is an intermediate phase that performs the preparation necessary to generate a proof. It is CPU-bound and typically completes in seconds. It is recommended that storage providers run this process on the server where PC2 is running.
+
+### Commit 2
+
+The last and final step in the sealing pipeline is Commit 2 (C2). This step involves the creation of zk-SNARK proof. Like PC2, this task is GPU-bound and is, therefore, best co-located with the PC2 task.
+
+Finally, the proof is committed on-chain in a message. As with the pre-commit messages, the commit messages are batched and held for 24 hours by default before committing on-chain to avoid sending messages for each and every sector. You can again avoid batching by running:
+
+```shell
+lotus-miner sectors batching commit --publish-now
+```
+
+Finally, the sealed sector is stored in the miner’s long-term storage space, along with unsealed sectors, which are required for retrievals if configured to do so.
diff --git a/storage-providers/architecture/sealing-rate.md b/storage-providers/architecture/sealing-rate.md
new file mode 100644
index 000000000..2c6018355
--- /dev/null
+++ b/storage-providers/architecture/sealing-rate.md
@@ -0,0 +1,44 @@
+---
+description: >-
+ The rate at which storage providers complete the sealing pipeline process is
+ called the sealing rate sealing capacity. This page describes considerations
+ and advice in regards to sealing rate.
+---
+
+# Sealing rate
+
+## Cost
+
+When setting up their business, storage providers must determine how fast they should seal and, thus, how much sealing hardware they should buy. In other words, the cost is an important factor in determining a storage provider’s sealing rate. For example, suppose you have an initial storage capacity of 100 TiB, which would account for 1 PiB [QAP](../../reference/general/glossary.md#quality-adjusted-storage-power) if all the sectors contain Filecoin Plus verified deals. If your sealing capacity is 2.5 TiB per day, you will seal your full 100 TiB in 40 days. Is it worth investing in double the sealing capacity to fill your storage in just 20 days? It might be if you are planning to grow way beyond 100 TiB. This is an example of the sort of cost considerations storage providers must factor in tuning the sealing rate.
+
+## Customer expectations
+
+A common reason that a storage provider may want or need a faster sealing rate is customer expectations. When you take on a customer deal, there are often requirements to seal a dataset of a certain size within a certain time window. If you are a new storage provider with 2.5 TiB per day in sealing capacity, you cannot take on a deal of 2 PiB that needs to be on-chain in 1 month; at the very least, you could not take the deal using your own sealing infrastructure. Instead, you can use a [Sealing-as-a-service provider](sealing-as-a-service.md), which can help you scale your sealing capabilities.
+
+## Balancing the sealing pipeline
+
+When designing their sealing pipeline, storage providers should consider bottlenecks, the grouping of similar tasks, and scaling out.
+
+### Bottlenecks
+
+The art of building a well-balanced sealing pipeline means having the bottlenecks where you expect them to be; any non-trivial piece of infrastructure always contains some kind of bottleneck. Ideally, you should design your systems so that the PC1 process is the bottleneck. By doing this, all other components are matched to the capacity required to perform PC1. With PC1 being the most resource-intensive task in the pipeline, it makes the most sense to architect a solution around this bottleneck. Knowing exactly how much sealing capacity you can get from your PC1 servers is vital so you can match the rest of your infrastructure to this throughput.
+
+Assuming you obtain maximum hardware utilization from your PC1 server to seal 15 sectors in parallel, that would mean a sealing rate of 3.75 TiB per day. The calculation is described below:
+
+```plaintext
+15 sectors x 32 GiB / 3 hours PC1 runtime x 24 hours / 1024 = 3.75 TiB /day
+```
+
+### Grouping similar tasks
+
+While a Lotus worker can run all of the various tasks in the sealing pipeline, different storage provider configurations may split tasks between workers. Because some tasks are similar in behavior and others are insignificant in terms of resource consumption, it makes sense to group like-tasks together on the same worker.
+
+A common grouping is _AddPiece (AP)_ and PreCommit1 (PC1) because AP essentially prepares the data for the PC1 task. If you have dedicated hardware for PreCommit2 (PC2), your scratch content will move to that other server. If you are grouping PC1 and PC2 on the same server, you won’t have the sealing scratch copied, but you will need a larger NVMe volume. Eventually, you may run out of sealing scratch space and not be able to start sealing additional sectors.
+
+As PC1 is CPU-bound and PC2 is GPU-bound, this is another good reason to separate those tasks into dedicated hardware, especially if you are planning to scale. Because PC2 is GPU-bound, it makes sense to have PC2, C1, and C2 colocated on the same worker.
+
+Another rule of thumb is to have two PC2 workers for every PC1 worker in your setup. The _WaitSeed_ phase occurs after PC2, which locks the scratch space for a sector until C1 and C2. In order to keep sealing sectors in PC1, PC2 must have sufficient capacity.
+
+### Scaling out
+
+A storage provider’s sealing capacity scales linearly with the hardware you add to it. For example, if your current setup allows for a sealing rate of 3 TiB per day, doubling the number of workers could bring you to 6 TiB per day. This requires that all components of your infrastructure are able to handle this additional throughput. Using [Sealing-as-a-Service providers](sealing-as-a-service.md) allows you to scale your sealing capacity without adding more hardware.
diff --git a/storage-providers/basics/README.md b/storage-providers/basics/README.md
new file mode 100644
index 000000000..6fdfd9b0d
--- /dev/null
+++ b/storage-providers/basics/README.md
@@ -0,0 +1,53 @@
+---
+description: >-
+ This page will help you understand how to plan a profitable business, design a
+ suitable storage provider architecture, and make the right hardware
+ investments.
+---
+
+# Basics
+
+The Filecoin network provides decentralized data storage and makes sure data is verified, always available, and immutable. Storage providers in the Filecoin network are in charge of storing, providing content and issuing new blocks.
+
+To become a storage provider in the Filecoin network you need a range of technical, financial and business skills. We will explain all the key concepts you need to understand in order to design a suitable architecture, make the right hardware investments, and run a profitable storage provider business.
+
+Follow these steps to begin your storage provider journey:
+
+1. Understand Filecoin economics
+2. Plan your business
+3. Make sure you have the right skills
+4. Build the right infrastructure
+
+## Understand Filecoin economics
+
+To understand how you can run a profitable business as a Filecoin storage provider, it is important to make sure you understand the economics of Filecoin. Once you understand all core concepts, you can build out a strategy for your desired ROI.
+
+Storage providers can also add additional value to clients when they offer certain certifications. These can enable a storage provider to charge customers additional fees for storing data in compliance with those standards, for example, HIPAA, SOC2, PCI, GDPR and others.
+
+[Filecoin economics ->](../filecoin-economics/storage-proving.md)
+
+## Plan your business
+
+The hardware and other requirements for running a Filecoin storage provider business are significantly higher than regular blockchain mining operations. The mechanisms are designed this way because, in contrast to some other blockchain solutions, where you can simply configure one or more nodes to “mine” tokens, the Filecoin network’s primary goal is to provide decentralized storage for humanity’s most valuable data.
+
+You need to understand the various earning mechanisms in the Filecoin network.
+
+[Filecoin deals ->](../filecoin-deals/storage-deals.md)
+
+## Make sure you have the right skills
+
+As will become clear, running a storage operation is a serious business, with client data and pledged funds at stake. You will be required to run a highly-available service, and there are automatic financial penalties if you cannot demonstrate data availability to the network. There are many things that can go wrong in a data center, on your network, on your OS, or at an application level.
+
+You will need skilled people to operate your storage provider business. Depending on the size and complexity of your setup this can be 1 person with skills across many different domains, or multiple dedicated people or teams.
+
+[People and skills ->](../skills/linux.md)
+
+## Build the right infrastructure
+
+At the lowest level, you will need datacenter infrastructure. You need people capable of architecting, racking, wiring and operating infrastructure components. Alternatively, you can get it collocated, or even entirely as a service from a datacenter provider.
+
+Take availability and suitable redundancy into consideration when choosing your datacenter or collocation provider. Any unavailability of your servers, network or storage can result in automatic financial penalties on the Filecoin network.
+
+[Software architecture ->](../architecture/lotus-components.md)
+
+[Infrastructure ->](../skills/storage.md)
diff --git a/storage-providers/basics/quickstart-guide.md b/storage-providers/basics/quickstart-guide.md
new file mode 100644
index 000000000..7c0b90e6c
--- /dev/null
+++ b/storage-providers/basics/quickstart-guide.md
@@ -0,0 +1,39 @@
+---
+description: >-
+ This page is a quick start guide for storage providers in the Filecoin
+ ecosystem.
+---
+
+# Quickstart guide
+
+## Explore the storage provider documentation
+
+Get ready to dive into the valuable resources of the [storage provider documentation](broken-reference/). This comprehensive guide offers a wealth of information about the role of storage providers in the Filecoin ecosystem, including insights into the economic aspects. You’ll also gain knowledge about the software architecture, hardware infrastructure, and the necessary skills for success.
+
+## Gain insights into ROI and collateral’s role
+
+To run a successful storage provider business, it’s crucial to understand the concept of [Return on Investment (ROI)](https://calc.filecoin.eu) and the significance of collateral. By planning ahead and considering various factors, such as CAPEX, OPEX, network variables, and collateral requirements, you can make informed decisions that impact your business’s profitability and desired capacity.
+
+## Get to know the ecosystem
+
+One of the truly enriching elements of the Filecoin ecosystem lies in its vibrant community. Meet the community on the [Filecoin Slack](https://filecoin.io/slack). Within this dynamic network, you’ll find a treasure trove of individuals who are eager to share their experiences and offer invaluable solutions to the challenges they’ve encountered along the way. Whether it’s navigating the intricacies of storage provider operations or overcoming hurdles on the blockchain, this supportive community stands ready to lend a helping hand. Embrace the spirit of collaboration and tap into this remarkable network.
+
+## Unleash the Power of Filecoin’s Reference Implementation
+
+Get ready to dive into the heart of the Filecoin network with [Lotus](https://lotus.filecoin.io), the leading reference implementation. As the most widely used software stack for interacting with the blockchain and operating a storage provider setup, Lotus holds the key to unlocking a world of possibilities. Seamlessly navigate the intricacies of this powerful tool and leverage its capabilities to propel your journey forward.
+
+## Hands-on learning and exploration
+
+It’s time to roll up your sleeves and embark on a hands-on adventure. With a multitude of options at your disposal, setting up a [local devnet](../../networks/local-testnet/) environment is the easiest and most exciting way to kickstart your Filecoin journey. Immerse yourself in the captivating world of sealing sectors and witness firsthand how this critical process works. Feel the thrill of experimentation as you delve deeper into the inner workings of this remarkable technology.
+
+## Transforming into a storage provider
+
+Congratulations on taking the next leap in becoming a full-fledged storage provider! Now is the time to determine your starting capacity and architect a tailored solution to accommodate it. Equip yourself with the [necessary hardware](../infrastructure/reference-architectures.md) to kickstart your journey on the mainnet. Test your setup on the calibration testnet to fine-tune your skills and ensure seamless operations. Once you’re ready, brace yourself for the excitement of joining the mainnet.
+
+## Supercharge your mainnet experience
+
+As you step into the vibrant realm of the mainnet, it’s time to supercharge your storage provider capabilities with [Boost](https://boost.filecoin.io). Discover the immense potential of this powerful software designed to help you secure storage deals and offer efficient data retrieval services to data owners. Unleash the full force of Boost and witness the transformative impact it has on your Filecoin journey.
+
+## Discover the world of verified deals and tools
+
+Within the Filecoin network there are many [programs and tools](../filecoin-deals/filecoin-programs.md) designed to enhance your storage provider setup. Explore the Estuary, Slingshot, Spade, and more, each offering unique opportunities to secure verified deals. Uncover the power of these tools as you dive into the documentation, gaining valuable insights and expanding your knowledge. Make the best use of data programs on your path to success.
diff --git a/storage-providers/filecoin-deals/README.md b/storage-providers/filecoin-deals/README.md
new file mode 100644
index 000000000..58fd4c6e6
--- /dev/null
+++ b/storage-providers/filecoin-deals/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ This section covers the different types of deals in the Filecoin network, and
+ how they relate to storage providers.
+---
+
+# Filecoin deals
+
diff --git a/storage-providers/filecoin-deals/auxiliary-services.md b/storage-providers/filecoin-deals/auxiliary-services.md
new file mode 100644
index 000000000..98d6938be
--- /dev/null
+++ b/storage-providers/filecoin-deals/auxiliary-services.md
@@ -0,0 +1,24 @@
+---
+description: >-
+ As a storage provider, you can set your business apart from the rest by
+ offering additional services to your customers. Many new use-cases for the
+ Filecoin network are emerging as new technologies are
+---
+
+# Auxiliary services
+
+## Saturn
+
+One of the additional services is participation in Saturn retrieval markets. [Saturn](https://saturn.tech) is a Web3 CDN (“content delivery network”), and will [launch in stages in 2023](https://saturn.tech/#roadmap). Saturn aims to be the biggest Web3 CDN, and biggest CDN overall with the introduction of Saturn, data stored on Filecoin is no longer limited to archive or cold storage, but can also be cached into a CDN layer for fast retrieval. Data that needs to be available quickly can then be stored on Filecoin and retrieved through Saturn. Saturn comes with 2 layers of caching, L1 and L2. L1 nodes typically run in data centers, require high availability and 10 GBs minimum connectivity. The L1 Saturn provider earns FIL through caching and serving data to clients. L2 nodes can be run via an app on desktop hardware.
+
+## FVM
+
+Other new opportunities are emerging since the launch of FVM (Filecoin Virtual Machine) in March 2023. The FVM allows smart contracts to be executed on the Filecoin blockchain. The FVM is Ethereum-compatible (also called the FEVM) and allows for entire new use cases to be developed in the Filecoin ecosystem. Think of on-chain FIL lending as an example, but the opportunities are countless.
+
+## Bacalhau
+
+A next step after the introduction of FVM is [Bacalhau](https://docs.bacalhau.org/)), which will be offering Compute over Data (COD). After the introduction of a compute layer on Filecoin, Bacalhau’s COD promises to run compute jobs over the data where the data resides, at the storage provider. Today, data scientists have to transfer their datasets to compute farms in order for their AI, ML or other data processing activities to run. Bacalhau will allow them to run compute activities on the data where the data is located, thereby removing the expensive requirement to move data around. Storage providers will be able to offer - and get rewarded for - providing compute power to data scientists and other parties who want to execute COD.
+
+## Storage tiering
+
+Another potential service to offer is storage tiers with various performance profiles. For example, storage providers can offer hot/online storage by keeping an additional copy of the unsealed data available for immediate retrieval, as well as the sealed that has been stored on the Filecoin Network.
diff --git a/storage-providers/filecoin-deals/charging-for-data.md b/storage-providers/filecoin-deals/charging-for-data.md
new file mode 100644
index 000000000..36864cfb9
--- /dev/null
+++ b/storage-providers/filecoin-deals/charging-for-data.md
@@ -0,0 +1,19 @@
+---
+description: >-
+ This page covers how storage providers can charge for data on the Filecoin
+ network.
+---
+
+# Charging for data
+
+Charging for data stored on your storage provider network is an essential aspect of running a sustainable business. While block rewards from the network can provide a source of income, they are highly dependent on the volatility of the price of FIL, and cannot be relied on as the sole revenue stream.
+
+To build a successful business, it is crucial to develop a pricing strategy that is competitive, yet profitable. This will help you attract and retain customers, as well as ensure that your business succeeds in the long term. While some programs may require storage providers to accept deals for free, or bid in auctions to get a deal, it is generally advisable to charge customers for most client deals.
+
+When developing your pricing strategy, it is important to consider the cost of sales associated with acquiring new customers. This cost consideration should include expenses related to business development, marketing, and sales, which you should incorporate into your business’ return-on-investment (ROI) calculation.
+
+In addition to sales costs, other factors contribute to your business’ total cost of ownership. These include expenses related to backups of your setup and data, providing an access layer to ingest data and for retrievals, preparing the data when necessary, and more. Investigating these costs is essential to ensure your pricing is competitive, yet profitable.
+
+By charging for data stored on your network, you can create a sustainable business model that allows you to invest in hardware and FIL as collateral, as well as grow your business over time. This requires skilled people capable of running a business at scale and interacting with investors, venture capitalists, and banks to secure the necessary funding for growth.
+
+Next to the sales cost, there are other things that contribute to the total cost of ownership of your storage provider business. Think of backups of your setup and the data, providing an access layer to ingest data and for retrievals, preparing the data (if not done already), and more.
diff --git a/storage-providers/filecoin-deals/filecoin-programs.md b/storage-providers/filecoin-deals/filecoin-programs.md
new file mode 100644
index 000000000..4d978daa1
--- /dev/null
+++ b/storage-providers/filecoin-deals/filecoin-programs.md
@@ -0,0 +1,65 @@
+---
+description: >-
+ This page covers the various programs and services that storage providers can
+ take part in.
+---
+
+# Filecoin programs
+
+Although it is possible to find your own data storage customers with valuable datasets they want to store, and have them verified through KYC ([Know Your Customer](https://en.wikipedia.org/wiki/Know\_your\_customer)) to create verified deals for [Fil+](../../basics/how-storage-works/filecoin-plus.md), there are also programs and platforms that make it easier for storage providers to receive verified deals.
+
+## Estuary
+
+[Estuary](https://estuary.tech/) is a service on the internet (but you can also host your own estuary node) that enables clients to upload data to the Filecoin Network via a web interface (and API).Estuary supports a maximum file size of 32 GB, has no limit on total upload size, and while in the alpha phase, its free! As a client you can upload data which will later be aggregated into a deal that gets sealed and proven on-chain. Estuary does the data collection and data preparation part of the workflow, and then hands off the sealing and long-term storing of the deals to storage providers. As a storage provider, you can choose to accept deals from Estuary. In order to receive deals from Estuary, a storage provider must sign up on the Estuary website. The storage provider is required to store sealed and unsealed copies of the data, and to enable retrievals from the unsealed copies for free.
+
+## Web3.storage
+
+[Web3.storage](https://web3.storage/) runs on “Elastic IPFS” as the inbound storage protocol offering scalability, performance and reliability as the platform grows. It guarantees the user (typically developers) that the platform will always serve your data when you need it. In the backend the data is uploaded onto the Filecoin Network for long-term storage.
+
+## Filecoin Green
+
+[Filecoin Green](https://green.filecoin.io) aims to measure the environmental impacts of Filecoin and verifiably drive them below zero, building infrastructure along the way that allows anyone to make transparent and substantive environmental claims. The team maintains the [Filecoin Energy Dashboard](https://filecoin.energy/) and works with storage providers to decarbonize their operations through the [Energy Validation Process](https://filecoin-green.gitbook.io/filecoin-green-documentation/storage-providers-green-guidance-documentation/storage-providers-tiered-sustainability-claims). Connect with the team on Slack at **#fil-green**, or via email at [green@filecoin.org](mailto:green@filecoin.org)
+
+## CO2.Storage
+
+[CO2.Storage](https://co2.storage) is a decentralized storage solution for structured data based on content addressed data schemas. CO2.Storage primarily focuses on structured data for environmental assets, such as Renewable Energy Credits, Carbon Offsets, and geospatial datasets and maps inputs to base data schemas (IPLD DAGs) for off-chain data (like metadata, images, attestation documents, and other assets) to promote the development of standard data schemas for environmental assets. This project is in _alpha_, and while many features can be considered stable, we are waiting until we are feature complete to fully launch. The Filecoin Green team is actively working on this project and welcomes contributions from the community.
+
+## Slingshot
+
+[Slingshot](https://slingshot.filecoin.io) is a program that unites Data clients, Data preparers and storage providers in a community to onboard data and share replicas of publicly valuable [_Open Datasets_](https://datasets.filecoin.io). Rather than providing a web interface like Estuary, Slingshot is a program that provides a workflow and tools for onboarding of large open datasets. The Slingshot Deal Engine provides deals to registered and certified storage providers. The data is prepared and uploaded using a tool called Singularity. The program has [clear requirements of a storage provider](https://slingshot.filecoin.io/requirements#participating-as-a-storage-provider-sp) regarding their capabilities.
+
+See the below video for more information on Slingshot:
+
+{% embed url="https://www.youtube.com/watch?v=14sasiEThig" %}
+A overview of the Slingshot project from the Enterprise Storage Provider Accelerator (ESPA).
+{% endembed %}
+
+## Evergreen
+
+[Evergreen](https://evergreen.filecoin.io/) extends the Slingshot program by aiming to store the open datasets forever. Standard deals have a maximum duration of 540 days, which is not long enough for valuable, open datasets that might need to be stored forever. Evergreen uses a deal engine ([Spade](https://github.com/ribasushi/spade)) that automatically renews deals to extend the lifetime of the dataset on-chain.
+
+## Moon Landing
+
+[Moon Landing](https://moon-landing.io) is a program related to Slingshot and Evergreen. Moon Landing aims to provide assistance for new storage providers to enter the Slingshot program. They provide technical assistance as well as establishing contacts with other storage providers. Participants in the Moon Landing program are matched with other storage providers in a capsule. The intent is to grow communities of storage providers that can learn from each other and can share data replicas with each other.
+
+## Saturn
+
+A whole new access capability is being launched with [Saturn](https://saturn.tech), which is the world’s first Web3 Content Delivery Network (CDN). Saturn, as a fully distributed CDN, allows clients to access their data via Saturn nodes close to them. Content retrieval times of less than 1 second will open up various new use-cases for Filecoin and create a new market for retrieval providers (L1 Saturn nodes) in which storage providers can also participate.
+
+## Partner tools and programs
+
+Many other programs and tools exist in the Filecoin community, developed by partners or storage providers. We list some examples below.
+
+### CIDGravity
+
+[CIDGravity](https://www.cidgravity.com/) is a software-as-a-service that allows storage providers to handle dynamic pricing and client management towards your solution. It integrates with deal engines such as [Boost](https://boost.filecoin.io).
+
+### Big Data Exchange
+
+Another program that allows storage providers easy access to Fil+ deals is [Big Data Exchange](https://www.bigdataexchange.io/). This platform allows storage providers to bid on datasets they are interested in storing. As a storage provider, you take part in an online auction where you offer FIL to store large data sets. The dataset is explained, together with the technical requirements for the storage provider. The volumes of the datasets offered here are - as the name suggests - big, which can yield big returns in block rewards. That is why, as a storage provider, you might want to bid to store a copy.
+
+Storing real client data means there will be expectations on retrievability of that data. The storage provider can provide the data to the client by keeping unsealed copies of the data and creating an access layer (web or other) for the client. Also Boost-http (see [https://lotus.filecoin.io](https://lotus.filecoin.io) ) provides a way to access data.
+
+### Filswan
+
+[Filswan](https://www.filswan.com/homepage) is an auctioning platform that matches clients with storage providers.
diff --git a/storage-providers/filecoin-deals/return-on-investment.md b/storage-providers/filecoin-deals/return-on-investment.md
new file mode 100644
index 000000000..c60bfaaa8
--- /dev/null
+++ b/storage-providers/filecoin-deals/return-on-investment.md
@@ -0,0 +1,51 @@
+---
+description: >-
+ This page covers the potential return-on-investment (ROI) for storage
+ providers (SPs) and how each SP can calculate their ROI.
+---
+
+# Return-on-investment
+
+Calculating the Return-on-Investment (ROI) of your storage provider business is essential to determine the profitability and sustainability of your operations. The ROI indicates the return or profit on your investment relative to the cost of that investment. There are several factors to consider when calculating the ROI of a storage provider business.
+
+**First**, the cost of the initial hardware investment and the collateral in FIL required to participate in the network must be considered. These costs are significant and will likely require financing from investors, venture capitalists, or banks.
+
+**Second**, the income generated from the block rewards must be factored into the ROI calculation. However, this income is subject to the volatility of the FIL token price, which can be highly unpredictable.
+
+**Third**, it is important to consider the cost of sales when calculating the ROI. Sales costs include the cost of acquiring new customers, marketing, and any fees associated with payment processing. These costs can vary depending on the sales strategy and the size of the business.
+
+**Fourth**, the total cost of ownership must be considered. This includes the cost of backups, providing access to ingest and retrieve data, preparing the data, and any other costs associated with operating a storage provider business.
+
+**Finally**, the forecasted growth of the network and the demand for storage will also impact the ROI calculation. If the network and demand for storage grow rapidly, the ROI may increase. However, if the growth is slower than anticipated, the ROI may decrease.
+
+**Overall**, calculating the ROI of a storage provider business is complex and requires a thorough understanding of the costs and income streams involved. The storage provider Forecast Calculator can assist in determining the ROI by accounting for various factors such as hardware costs, token price, and expected growth of the network.
+
+Calculating the ROI of your storage provider business is important. Check out the [Storage Provider Forecast Calculator](https://calc.filecoin.eu/) for more details.
+
+For more information and context see the following video:
+
+{% embed url="https://www.youtube.com/watch?v=zboAgawHT-o" %}
+
+It takes more variables than the cost vs. the income. In summary, the factors that influence your ROI are:
+
+* **Verified Deals:**
+
+ How much of your total sealed capacity will be done with Verified Deals (Filecoin Plus)? Those deals give a far higher return because of the 10x multiplier that is added to your storage power and block rewards.
+* **Committed Capacity:**
+
+ How much of your total sealed capacity will be just committed capacity (CC) sectors (sometimes also called pledged capacity)? These deals give a lower return compared to verified deals but are an easy way to get started in the network. Relying solely on this to generate income is challenging though, especially when the price of FIL is low.
+* **Sealing Capacity:**
+
+ How fast can you seal sectors? Faster sealing means you can start earning block rewards earlier and add more data faster. The downside is that it requires a lot of [hardware](../infrastructure/reference-architectures.md).
+* **Deal Duration:**
+
+ How long do you plan to run your storage provider? Are you taking short-term deals only, or are you in it for the long run? Taking long-term deals comes with an associated risk: if you can’t keep your storage provider online for the duration of the deals, you will get penalized. Short-term deals that require extension have the downside of higher operational costs to extend (which requires that the data be re-sealed.).
+* **FIL Collateral pledged:**
+
+ A substantial amount of FIL is needed to start accepting deals in the Filecoin network. Verified deals require more pledged collateral than CC-deals. Although the collateral is not lost if you run your storage provider business well, it does mean an upfront investment (or lending).
+* **Hardware Investment:**
+
+ Sealing, storing, and proving the data does require a significant hardware investment as a storage provider. Although relying on services like [sealing-as-a-service](../architecture/sealing-as-a-service.md) can lower these requirements for you, it is still an investment in high-end hardware. Take the time to understand your requirements and your future plans so that you can invest in hardware that will support your business.
+* **Operational Costs:**
+
+ Last but not least there’s the ongoing monthly cost of operating the storage provider business. Both the costs for technical operations as well as business operations need to be taken into consideration.
diff --git a/storage-providers/filecoin-deals/snap-deals.md b/storage-providers/filecoin-deals/snap-deals.md
new file mode 100644
index 000000000..0a6649c84
--- /dev/null
+++ b/storage-providers/filecoin-deals/snap-deals.md
@@ -0,0 +1,21 @@
+---
+description: >-
+ Snap Deals are a way to convert Committed Capacity sectors (that store no real
+ data) into data sectors to be used for storing actual data and potentially
+ FIL+ data.
+---
+
+# Snap deals
+
+Instead of destroying a previously sealed sector and recreating a new sector that needs to be sealed, Snap Deals allow data to be ingested into CC-sectors without the requirement of re-sealing the sector.
+
+## Why would you do snap deals?
+
+There are two main reasons why a storage provider could be doing Snap Deals, also known as _“snapping up their sectors”_ in the Filecoin community:
+
+* The first reason is that the 10x storage power on the same volume of data stored is a strong incentive to upgrade to verified deals for those storage providers who started out on CC-sectors and wish to upgrade to verified deals with Filecoin Plus.
+* The second reason applies to storage providers who decide to start sealing CC-sectors, but later then fill them with verified deals. When you start as a storage provider or when you expand your storage capacity, it might be a good idea to fill your capacity with CC-sectors in the absence of verified deals. Not only do you start earning block rewards over that capacity, but more importantly, you can plan the sealing throughput, and balance your load over the available hardware. If your [sealing rate](../architecture/sealing-rate.md) is 3 TiB/day, it makes no sense to feed 5 TiB/day into the pipeline. This creates congestion and possibly negative performance. If you are sealing 3 TiB/day for 33 days in a row, you end up with 99 TiB of sealed sectors that were sealed evenly and consistently. If you then take on a 99 TiB verified deal (accounting for 1 PiB QAP), the only thing required is to snap up the sectors.
+
+Snapping up sectors with snap deals puts a lot less stress on the storage provider’s infrastructure. The only task that is executed from the [sealing pipeline](../architecture/sealing-pipeline.md) is the replica-update and prove-replica-update phase, which is similar to the PC2 process. The CPU-intensive PreCommit 1 phase is not required in this process.
+
+Do not forget to provide the collateral funds when snapping up a verified deal. The same volume requires more collateral when it counts as FIL+ data, namely 10x the collateral compared to raw storage power.
diff --git a/storage-providers/filecoin-deals/storage-deals.md b/storage-providers/filecoin-deals/storage-deals.md
new file mode 100644
index 000000000..9e59c052d
--- /dev/null
+++ b/storage-providers/filecoin-deals/storage-deals.md
@@ -0,0 +1,33 @@
+---
+description: >-
+ This page discusses what storage deals are, and how storage providers can
+ prepare for them.
+---
+
+# Storage deals
+
+The real purpose of Filecoin is to store humanity’s most important information. As a storage provider, that means accepting storage deals and storing deal sectors with real data in it. As before, those sectors are either 32 GiB or 64 GiB in size and require that the data be prepared as a content archive; that is, as a CAR file..
+
+## Data preparation
+
+Data preparation, which includes packaging files into size appropriate CAR files, is either done by a separate Data Preparer actor, or by storage providers acting as Data Preparers. The latter option is common for new storage providers, as they normally only have a few files that need preparation.
+
+Data preparation can be done in various ways, depending on your use-case. Here are some valuable sources of information:
+
+* [Filecoin Data Tools](https://docs.filecoindata.tools/about/) is a collection of tools for data preparation and deal making.
+* The [data-prep-tools repo](https://github.com/filecoin-project/data-prep-tools) has a set of CLI tools for more specific use-cases.
+* [Singularity](https://github.com/tech-greedy/singularity) is a command-line tool to put data into CAR files, create [CIDs](../../reference/general/glossary#content-identifier-cid), and even initiate deals with storage providers.
+
+See the following video for a demonstration on Singularity:
+
+{% embed url="https://www.youtube.com/watch?v=1ZjKxkI6-Ic" %}
+Xinan Xu's presentation on Singularity
+{% endembed %}
+
+## Deal Market
+
+In order for storage providers to accept deals and set their deal terms, they need to install some market software, such as [Boost](https://boost.filecoin.io/). This component interacts with data owners, accepts deals if they meet the configured requirements, gets a copy of the prepared data (CAR files), and puts it through the [sealing pipeline](../architecture/sealing-pipeline.md), after which it is in the state required to be proven to the network.
+
+The storage provider can (and should) keep unsealed data copies available for retrieval requests from the client. It is the same software component, Boost, that is responsible for HTTP retrievals from the client and for setting the price for retrievals.
+
+Many tools and platforms act as a deal making engine in front of Boost. This is the case for [Delta](https://docs.filecoindata.tools/about/delta-tech-stack/overview-of-delta-technology-stack) and [Spade](https://github.com/ribasushi/spade) for instance.
diff --git a/storage-providers/filecoin-deals/verified-deals.md b/storage-providers/filecoin-deals/verified-deals.md
new file mode 100644
index 000000000..23d2b1448
--- /dev/null
+++ b/storage-providers/filecoin-deals/verified-deals.md
@@ -0,0 +1,45 @@
+---
+description: >-
+ This page discusses what verified deals are, and how they can impact storage
+ providers.
+---
+
+# Verified deals
+
+Filecoin aims to be a decentralized storage network for humanity’s essential information. To achieve this, it’s crucial to add valuable data to the network. Filecoin Plus (Fil+) is a social trust program encouraging storage providers to store data in _verified deals_. A deal becomes _verified_ after the data owner (client) completes a verification process, where community _notaries_ assess the client’s use of Filecoin to determine its relevance and value to the Filecoin mission: storing and preserving humanity’s vital data. Notaries conduct due diligence by questioning clients and building reasonable confidence in their trustworthiness and use case.
+
+## DataCap
+
+Notaries are responsible for allocating a resource called _DataCap_ to clients with valuable storage use cases. DataCap is a non-exchangeable asset that is allocated by notaries to data clients. DataCap gets assigned to a wallet but cannot be sold or exchanged. The client can only spend the DataCap as part of making a verified deal with a storage provider. DataCap is a single use credit, and a client’s DataCap balance is deducted based on the size of the data stored in verified deals.
+
+## Quality Adjusted Power (QAP)
+
+Storage providers are incentivized by the Filecoin network to store verified deals. A 10x quality adjustment multiplier is set at the protocol level for storage offered for verified deals. A 100 TiB dataset will account for 1 PiB of _Quality-Adjusted-Power_ (QAP). This means the storage provider has a larger share of storage power on the Filecoin network and will be more likely to get elected for WinningPoSt (see [Storage proving](../filecoin-economics/storage-proving.md)). The storage provider will earn 10x more block rewards for the same capacity made available to the network, if that capacity is storing verified deals.
+
+When storing real customer data and not simply [CC sectors](../../reference/general/glossary.md#capacity-commitment), a whole new set of responsibilities arises. A storage provider must have the capacity to make deals, to be able to obtain a copy of the data, to prepare the data for the network, prove the data on-chain via sealing, and last but not least, have a means to offer retrieval of the data to the client when requested.
+
+## Responsibilities
+
+As a storage provider, you play a crucial role in the ecosystem. Unlike miners in other blockchains, storage providers must do more than offer disk space to the network. Whether onboarding new customers to the network, or storing copies data from other storage providers for clients seeking redundancy, providing storage can involve:
+
+* Business development.
+* Sales and marketing efforts.
+* Hiring additional personnel.
+* Networking.
+* Relationship building.
+
+Acquiring data copies requires systems and infrastructure capable of ingesting large volumes of data, sometimes up to a PiB. This necessitates significant internet bandwidth, with a minimum of 10 GB. For instance, transferring 1 PiB of data takes approximately 240 hours on a 10 GB connection. However, many large storage providers use up to 100 GB internet connections. \`\`\`
+
+Data preparation, which involves separating files and folders in CAR files, is time-consuming and requires expertise. You can delegate this task to a Data Preparer for a fee or assume the role yourself. Tools like [Singularity](https://singularity.storage/) simplify this process.
+
+Once the data is sealed and you are proving your copies on-chain (i.e. on the blockchain), you will need to offer retrievals to your customer as well. This obviously requires network bandwidth once more, so you may need to charge for retrievals accordingly.
+
+## Tools
+
+Tools and programs exist to support Fil+, but storage providers need to know how to operate this entire workflow. See [Filecoin Plus Programs](filecoin-programs.md) for more information on available programs. See [Architecture](../architecture/lotus-components.md) for more information on the tooling and software components.
+
+## Rewards & penalties
+
+With great power, comes great responsibility, which also counts for storage power: rewards on Fil+ deals are 10x, but so are the penalties. Because a sector of 32 GiB counts for 320 GiB of storage power (10x), the rewards and the penalties are calculated on the QAP of 320 GiB. Fil+ allows a storage provider to earn more block rewards on a verified deal, compared to a regular data deal. The 10x multiplier on storage power that comes with a verified deal, however, also requires 10x collateral from the storage provider.
+
+If the storage provider is then not capable of keeping the data and systems online and fails to submit the daily required proofs (WindowPoSt) for that data, the penalties (_slashing_) are also 10x higher than over regular data deals or CC sectors. Larger storage power means larger block rewards, larger collateral and larger slashing. The stakes are high - after all, we’re storing humanity’s most important information with Filecoin.
diff --git a/storage-providers/filecoin-economics/README.md b/storage-providers/filecoin-economics/README.md
new file mode 100644
index 000000000..467d41380
--- /dev/null
+++ b/storage-providers/filecoin-economics/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ This section discusses the economics of Filecoin in relation to storage
+ providers.
+---
+
+# Filecoin economics
+
diff --git a/storage-providers/filecoin-economics/block-rewards.md b/storage-providers/filecoin-economics/block-rewards.md
new file mode 100644
index 000000000..1fd577d27
--- /dev/null
+++ b/storage-providers/filecoin-economics/block-rewards.md
@@ -0,0 +1,61 @@
+---
+description: >-
+ This page describes block rewards in Filecoin, where storage providers are
+ elected to produce new blocks and earn FIL as rewards.
+---
+
+# Block rewards
+
+## What are block rewards?
+
+WinningPoSt (short for [Winning Proof of SpaceTime](https://spec.filecoin.io/algorithms/pos/post/)) is the cryptographic challenge through which storage providers are rewarded for their contributions to the network. At the beginning of each epoch (1 epoch = 30 seconds), a small number of storage providers are elected by the network to mine new [blocks](../../reference/general/glossary.md#block). Each elected storage provider who successfully creates a block is granted Filecoin tokens by means of a _block reward_. The amount of FIL per block reward varies over time and is listed on various blockchain explorers like [Filfox](https://filfox.info/en).
+
+The election mechanism of the Filecoin network is based on the “storage power” of the storage providers. A minimum of 10 TiB in storage power is required to be eligible for WinningPoSt, and hence to earn block rewards. The more storage power a storage provider has, the more likely they will be elected to mine a block. This concept becomes incredibly advantageous in the context of [Filecoin Plus verified deals](../../basics/how-storage-works/filecoin-plus.md).
+
+## Filecoin’s storage capacity
+
+The Filecoin network is composed of storage providers who offer storage capacity to the network. This capacity is used to secure the network, as it takes a significant amount of storage to take part in the consensus mechanism. This large capacity makes it impractical for a single party to reach 51% of the network power, since an attacker would need 10 EiB in storage to control the network. Therefore, it is important that the raw capacity also referred to as _raw byte power_, remains high. The Filecoin spec also included a _baseline power_ above which the network yields maximum returns for the storage providers.
+
+The graph below shows the evolution of network capacity on the Filecoin network. As can be seen, the baseline power goes up over time (and becomes exponential). This means from May 2021 to February 2023 the network yielded maximum returns for storage providers. However, in recent history, Quality Adjusted Power (QAP) has taken over as a leading indicator of relevance for the Filecoin network. QAP is the result of the multiplier when storing verified deals:
+
+
+
+Check out the Starboard dashboard for the most up-to-date [Network Storage Capacity](https://dashboard.starboard.ventures/capacity-services#network-storage-capacity).
+
+## Impact of storage capacity on block rewards
+
+As mentioned before, when the Raw Byte Power is above the Baseline Power, storage providers yield maximum returns. When building a business plan as a storage provider, it is important not to rely solely on block rewards. Block rewards are an incentive mechanism for storage providers. However, they are volatile and depend on the state of the network, which is largely beyond the control of storage providers.
+
+The amount of FIL that is flowing to the storage provider per earned block reward is based on a combination of simple minting and baseline minting. Simple minting is the minimum amount of FIL any block will always have, which is 5.5. Baseline minting is the extra FIL on top of the 5.5 that comes from how close the Raw Byte Power is to the Baseline Power.
+
+The below graph shows the evolution of FIL per block reward over time:
+
+
+
+There is a positive side to releasing less FIL per block reward too. As Filecoin has a capped maximum token supply of 2 billion FIL, the slower minting rate allows for minting over a longer period. A lower circulating supply also has a positive effect on the price of FIL.
+
+See the [Crypto Economics](../../basics/what-is-filecoin/crypto-economics.md) page of this documentation and the [Filecoin spec](https://spec.filecoin.io/#section-systems.filecoin\_token.minting\_model) for more information.
+
+## Filecoin’s storage capacity
+
+The Filecoin network is composed of storage providers who offer storage capacity to the network. This capacity is used to secure the network, as it takes a significant amount of storage to take part in the consensus mechanism. This large capacity makes it impractical for a single party to reach 51% of the network power, since an attacker would need 10 EiB in storage to control the network. Therefore, it is important that the raw capacity also referred to as _raw byte power_, remains high. The Filecoin spec also included a _baseline power_ above which the network yields maximum returns for the storage providers.
+
+The graph below shows the evolution of network capacity on the Filecoin network. As can be seen, the baseline power goes up over time (and becomes exponential). This means from May 2021 to February 2023 the network yielded maximum returns for storage providers. However, in recent history, Quality Adjusted Power (QAP) has taken over as a leading indicator of relevance for the Filecoin network. QAP is the result of the multiplier when storing verified deals:
+
+
+
+Check out the Starboard dashboard for the most up-to-date [Network Storage Capacity](https://dashboard.starboard.ventures/capacity-services#network-storage-capacity).
+
+## Impact of storage capacity on block rewards
+
+As mentioned before, when the Raw Byte Power is above the Baseline Power, storage providers yield maximum returns. When building a business plan as a storage provider, it is important not to rely solely on block rewards. Block rewards are an incentive mechanism for storage providers. However, they are volatile and depend on the state of the network, which is largely beyond the control of storage providers.
+
+The amount of FIL that is flowing to the storage provider per earned block reward is based on a combination of simple minting and baseline minting. Simple minting is the minimum amount of FIL any block will always have, which is 5.5. Baseline minting is the extra FIL on top of the 5.5 that comes from how close the Raw Byte Power is to the Baseline Power.
+
+The below graph shows the evolution of FIL per block reward over time:
+
+
+
+There is a positive side to releasing less FIL per block reward too. As Filecoin has a capped maximum token supply of 2 billion FIL, the slower minting rate allows for minting over a longer period. A lower circulating supply also has a positive effect on the price of FIL.
+
+See the [Crypto Economics](../../basics/what-is-filecoin/crypto-economics.md) page of this documentation and the [Filecoin spec](https://spec.filecoin.io/#section-systems.filecoin\_token.minting\_model) for more information.
diff --git a/storage-providers/filecoin-economics/committed-capacity.md b/storage-providers/filecoin-economics/committed-capacity.md
new file mode 100644
index 000000000..0fcfae384
--- /dev/null
+++ b/storage-providers/filecoin-economics/committed-capacity.md
@@ -0,0 +1,18 @@
+---
+description: >-
+ The content discusses participating in the network by providing Committed
+ Capacity (CC) sectors. CC sectors are storage sectors that are filled with
+ random data, instead of customer data.
+---
+
+# Committed capacity
+
+One way of participating in the Filecoin network is by providing [_Committed Capacity_ (CC) sectors](../../reference/general/glossary.md#capacity-commitment) to the network. CC sectors do not contain customer data but are filled with random data when they are created. The goal for the Filecoin network is to have a distributed network of verifiers and collaborators to the network in order to run and maintain a healthy blockchain. Any public blockchain network requires enough participants in the consensus mechanism of the blockchain, in order to guarantee that transactions being logged onto the blockchain are legitimate. Because Filecoin’s consensus mechanism is based on Proof-of-Storage, we need sufficient storage providers that pledge capacity to the network, and thus take part in the consensus process. This is done via Committed Capacity sectors. This can be done in sectors of 32 GiB or 64 GiB. For more detail, see the [architectural overview](../architecture/lotus-components.md).
+
+## Availability requirements
+
+Because the Filecoin network needs consistency, meaning all data stored is still available and unaltered, a storage provider is required to keep their capacity online, and be able to demonstrate to the network that the capacity is online. WindowPoSt verification is the process that checks that the provided capacity remains online. If not, a storage provider is penalized (or _slashed_) over the collateral FIL they provided for that capacity and their storage power gets reduced. This means an immediate reduction in capital (lost FIL), but also a reduction in future earnings because block rewards are correlated to storage power, as explained above. See [Slashing](slashing.md), [Storage Proving](storage-proving.md) and [FIL Collateral](fil-collateral.md) for more information.
+
+## What’s next?
+
+Providing committed capacity is the easiest way to get started as a storage provider, but the economics are very dependent on the price of FIL. If the price of FIL is low, it can be unprofitable to provide only committed capacity. The optimal FIL-price your business needs to be profitable will depend on your setup. Profitability can be increased by utilizing [Filecoin Plus](../../basics/how-storage-works/filecoin-plus.md), along with [extra services you can charge for](../filecoin-deals/auxiliary-services.md).
diff --git a/storage-providers/filecoin-economics/fil-collateral.md b/storage-providers/filecoin-economics/fil-collateral.md
new file mode 100644
index 000000000..cbb7afe86
--- /dev/null
+++ b/storage-providers/filecoin-economics/fil-collateral.md
@@ -0,0 +1,45 @@
+---
+description: >-
+ This page discusses the concept of collateral in Filecoin for storage
+ providers.
+---
+
+# FIL collateral
+
+As a storage provider on the network, you will have to create FIL wallets and add FIL to them. This is used to send messages to the blockchain but is also used for collateral. Providing storage capacity to the network requires you to provide FIL as collateral, which goes into a locked wallet on your Lotus instance. The [Lotus documentation](https://lotus.filecoin.io/storage-providers/operate/addresses/) details the process of setting up your wallets and funding wallets for the initial setup. Filecoin uses upfront token collateral, as in proof-of-stake protocols, proportional to the storage hardware committed. This gets the best of both worlds to protect the network: attacking the network requires both acquiring and running the hardware, but it also requires acquiring large quantities of the token.
+
+## Types of collateral
+
+To satisfy the varied collateral needs of storage providers in a minimally burdensome way, Filecoin includes three different collateral mechanisms:
+
+* _Initial pledge collateral_, an initial commitment of FIL that a miner must provide with each sector.
+* _Block rewards as collateral_, a mechanism to reduce the initial token commitment by vesting block rewards over time.
+* _Storage deal provider collateral_, which aligns incentives between storage provider and client and can allow storage providers to differentiate themselves in the market.
+
+For more detailed information about how collateral requirements are calculated, see the [miner collateral section in the Filecoin spec](https://spec.filecoin.io/systems/filecoin\_mining/miner\_collaterals/).
+
+When a storage provider fails to answer to the WindowsPoSt challenges within the 30-minute deadline (see [Storage Proving](storage-proving.md)), storage is taken offline, or any storage deal rules are broken, the provider is penalized against the provided collateral. This penalty is called [_slashing_](slashing.md) and means that a portion of the pledged collateral is forfeited to the `f099` address from your locked or available rewards, and your storage power is reduced. The `f099` address is the address where all burned FIL goes.
+
+## Commit Pledge
+
+The amount of required collateral depends on the amount of storage pledged to the Filecoin network. The bigger volume you store, the more collateral is required. Additionally, Filecoin Plus uses a [QAP](../../reference/general/glossary.md#quality-adjusted-storage-power) multiplier to increase the collateral requirement. See [Verified Deals with Filecoin Plus](../filecoin-deals/verified-deals.md) for more information.
+
+The formula for the required collateral is as follows:
+
+_Collateral needed for X TiB = (Current Sector Initial Pledge) x (32) x (X TiB)_
+
+For instance, for 100 TiB at 0.20 FIL / 32 GiB sector, this means:
+
+_0.20 FIL x 32 x 100 = 640 FIL_
+
+The “Current Sector Initial Pledge" can be found on blockchain explorers like [Filfox](https://filfox.info/en) and [Filscout](https://www.filscout.com/en) and on the [Starboard dashboards](https://dashboard.starboard.ventures/capacity-services#commit-pledge-per-32gib-qap).
+
+## Gas fees
+
+Another cost factor in the network is gas. Storage providers not only pledge collateral for the capacity they announce on-chain. The network also burns FIL in the form of gas fees. Most activity on-chain has some level of gas involved. For storage providers, this is the case for committing sectors.
+
+The gas fees fluctuate over time and can be followed on various websites like [FGas](https://fgas.io/).
+
+## FIL lending programs
+
+The ecosystem does have [FIL Lenders](https://filecoin-lending.com/read-more) who can provide you FIL (with interest) to get you started, which you can pay back over time and with the help of earned block rewards. Every lender, though, will still require you to supply up to 20% of the required collateral. The [Filecoin Virtual Machine](../../smart-contracts/fundamentals/the-fvm.md), introduced in March 2023, enables the creation of new lending mechanisms via smart contracts.
diff --git a/storage-providers/filecoin-economics/slashing.md b/storage-providers/filecoin-economics/slashing.md
new file mode 100644
index 000000000..fb03fa01e
--- /dev/null
+++ b/storage-providers/filecoin-economics/slashing.md
@@ -0,0 +1,20 @@
+---
+description: >-
+ Slashing penalizes storage providers that either fail to provide reliable
+ uptime or act maliciously against the network. This page discusses what
+ slashing means to storage providers.
+---
+
+# Slashing
+
+## Storage fault slashing
+
+This term encompasses a broad set of penalties which are to be paid by storage providers if they fail to provide sector reliability or decide to voluntarily exit the network. These include:
+
+* **Fault fees** are incurred for each day a storage provider’s sector is offline (fails to submit Proofs-of-Spacetime to the chain). Fault fees continue until the associated wallet is empty and the storage provider is removed from the network. In the case of a faulted sector, there will be an additional sector penalty added immediately following the fault fee.
+* **Sector penalties** are incurred for a faulted sector that was not declared faulted before a _WindowPoSt_ check occurs. The sector will pay a fault fee after a Sector Penalty once the fault is detected.
+* **Termination fees** are incurred when a sector is voluntarily or involuntarily terminated and is removed from the network.
+
+## Consensus fault slashing
+
+This penalty is incurred when committing consensus faults. This penalty is applied to storage providers that have acted maliciously against the network’s consensus functionality.
diff --git a/storage-providers/filecoin-economics/storage-proving.md b/storage-providers/filecoin-economics/storage-proving.md
new file mode 100644
index 000000000..e3a62a59b
--- /dev/null
+++ b/storage-providers/filecoin-economics/storage-proving.md
@@ -0,0 +1,11 @@
+# Storage proving
+
+Storage proving, known as _Proof-of-Spacetime_ (“PoSt”), is the mechanism that the Filecoin blockchain uses to validate that storage providers are continuously providing the storage they claim. Storage providers earn block rewards each time they successfully answer a PoSt challenge.
+
+## Proving deadlines
+
+As a storage provider, you must preserve the data for the duration of the [deal](../../reference/general/glossary.md#deal), which are on-chain agreements between a client and a storage provider. As of March 2023, deals must have a minimum duration of 180 days, and maximum duration of 540 days. The latter value was chosen to balance long deal length with cryptographic security. Storage providers must be able to continuously prove the availability and integrity of the data they are storing. Every storage sector of 32 GiB or 64 GiB gets verified once in each 24 hour period. This period is called a _proving period_. Every proving period of 24 hours is broken down into a series of 30 minute, non-overlapping _deadlines_. This means there are 48 deadlines per day. Storage sectors are grouped in a _partition_, and assigned to a proving deadline. All storage sectors in a given partition will always be verified during the same deadline.
+
+## WindowPoSt
+
+The cryptographic challenge for storage proving is called _Window Proof-of-Spacetime_ (WindowPoSt). Storage providers have a deadline of 30 minutes to respond to this WindowPoSt challenge via a message on the blockchain containing a [zk-SNARK](https://en.wikipedia.org/wiki/Zero-knowledge\_proof) proof of the verified sector. Failure to submit this proof within the 30 minute deadline, or failure to submit it at all, results in _slashing_. Slashing means a portion of the [collateral](fil-collateral.md) will be forfeited to the f099 burn address and the _storage power_ of the storage provider gets reduced. Slashing is a way to penalize storage providers who fail to meet the agreed upon standards of storage.
diff --git a/storage-providers/infrastructure/README.md b/storage-providers/infrastructure/README.md
new file mode 100644
index 000000000..7f0056a71
--- /dev/null
+++ b/storage-providers/infrastructure/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ This section covers various infrastructure considerations that storage
+ providers should be aware of.
+---
+
+# Infrastructure
+
diff --git a/storage-providers/infrastructure/backup-and-disaster-recovery.md b/storage-providers/infrastructure/backup-and-disaster-recovery.md
new file mode 100644
index 000000000..4c9b21302
--- /dev/null
+++ b/storage-providers/infrastructure/backup-and-disaster-recovery.md
@@ -0,0 +1,58 @@
+---
+description: >-
+ This page covers the basics of backups and disaster recovery for storage
+ providers. A backup strategy is only as good as the last successful restore.
+---
+
+# Backup and disaster recovery
+
+It is crucial to have a backup of any production system. It is even more crucial to be able to restore from that backup. These concepts are vital to a Filecoin storage provider because not only are you storing customer data for which you have (on-chain) contracts, you have also pledged a large amount of collateral for that data.
+
+If you are unable to restore your Lotus miner and start proving your storage on-chain, you risk losing a lot of money. If you are unable to come back online in 6 weeks, you are losing **all** of your collateral, which will most likely lead to bankruptcy.
+
+As such it matters less what kind of backup you have, as long as you are able to restore from it fast.
+
+## High availability (HA) versus Disaster recovery (DR)
+
+It is a common misconception to assume you are covered against any type of failure by implementing a highly available (HA) setup. HA will protect against unplanned unavailability in many cases, such as a system failure. It will **not** protect you against data corruption, data loss, ransomware, or a complete disaster at the datacenter level.
+
+Backups and (tested) restores are the basis for a DR (disaster recovery) plan and should be a major point of attention for any Filecoin storage provider, regardless of your size of operation.
+
+## Recovery Time Objective (RTO) and Recovery Point Objective
+
+When planning for backup and recovery, the terms RPO and RTO are important concepts to know about.
+
+* **Recovery Time Objective (RTO)** is the time taken to recover a certain application or dataset in the event of a failure. Fast recovery means a shorter RTO (typically measured in hours/minutes/seconds). Enterprises plan for very short RTOs when downtime is not acceptable to their business. Application and file system snapshots typically provide the lowest possible RTO.
+* **Recovery Point Objective (RPO)** is the last known working backup from which you can recover. A shorter RPO means the time between the last backup and the failure is short. Enterprises plan for very short RPOs for systems and data that changes very often (like databases). Synchronous replication of systems and data typically provides the lowest possible RPO.
+
+### RPO/RTO for storage providers
+
+Although ‘RPO zero’ and ‘RTO zero’ are the ideal, in practice it is rarely economical. DR planning requires compromises and if you are a storage provider you need to consider cost versus RPO.
+
+RTO is typically less concerning for storage providers. The most critical parts to recover are your sealed storage and your wallets. Wallet addresses typically do not change, so the only thing to worry about is your sealed storage. With storage level snapshots (such as ZFS snapshots), you can reduce your RTO to almost zero.
+
+For RPO, although synchronous replication, together with snapshots, can reduce RPO to nearly zero, that is not a cost-efficient solution. Asynchronous replication of sealed storage is the most viable option if you are running at small-to-medium scale. Once you grow beyond 10PB of storage, even replicating the data will become an expensive solution.
+
+In such cases you might want to look into _storage cluster_ solutions with built-in redundancy. Very large storage providers will operate [Ceph clusters](https://en.wikipedia.org/wiki/Ceph\_\(software\)) or other solutions with built-in _erasure coding_. Although this does more become more like a HA setup than a DR setup, at scale, it becomes the only economically viable option.
+
+Running a storage cluster comes with its own operational challenges though, which does not make this a good fit for small-to-medium setups.
+
+### RPO/RTO for customers
+
+Both storage providers and data owners (customers) should look at RPO and RTO options. As a customer, you can achieve HA/DR by having multiple copies of your data stored (and proven) across multiple storage providers. In the event of data loss at one provider, other providers will hold a copy of your data from which you can retrieve. As a customer, you choose how much redundancy you need, by doing storage deals with more providers.
+
+RTO for data owners is a matter of how fast the storage provider(s) can provide you the data.
+
+* Do your storage providers offer “fast retrieval” of the data through unsealed copies? If not, the unsealing process (typically multiple hours) must be calculated into the RTO.
+* Do your storage providers offer retrieval through [Saturn, (the Web3 CDN)](https://saturn.tech) for ultra-fast retrieval?
+* Do your storage providers pin your data on IPFS, in addition to storing it on Filecoin?
+
+RPO for data owners is less of a concern, especially once the data is sealed. The Filecoin blockchain will enforce availability and durability of the data being stored, once it is sealed. It is therefore important, as a data owner, to know how fast your storage provider can prove the data on-chain.
+
+## Backup techniques
+
+* A first level of protection comes from ZFS (if you are using ZFS as the file system for your storage). Having ZFS snapshots available protects you against data loss caused by human error or tech failure, and potentially even against ransomware. Other file systems typically also have a way to make snapshots, albeit not as efficiently as ZFS.
+* A second level of defense comes from a dedicated backup system. Not only should you have backup storage (on a different storage array than the original data), you also need to have a backup server that can at a minimum run the Lotus daemon, Lotus miner and 1 WindowPoSt worker (note: this requires a GPU). With that you can sync the chain, offer retrievals and prove your storage on-chain, from your backup system, whilst you bring your primary back online.
+* An alternative technique to having a dedicated backup system and copy is to have a storage cluster. This still requires a backup system to run the Lotus daemon, Lotus miner and PoST worker on. Implementing a storage cluster is usually only done for large-scale deployments as it comes with additional operational tasks.
+
+For maximum resilience, you could host your backup system (server + storage) in a different datacenter than your primary system.
diff --git a/storage-providers/infrastructure/network.md b/storage-providers/infrastructure/network.md
new file mode 100644
index 000000000..9d005926d
--- /dev/null
+++ b/storage-providers/infrastructure/network.md
@@ -0,0 +1,32 @@
+---
+description: >-
+ This page covers topics related to internet bandwidth requirements, LAN
+ bandwidth considerations, the use of VLANs for network traffic separation,
+ network redundancy measures, and common topologies.
+---
+
+# Network
+
+## Internet bandwidth
+
+The amount of internet bandwidth required for a network largely depends on the size of the organization and customer expectations. A bandwidth between 1 Gbps and 10 Gbps is generally sufficient for most organizations, but the specific requirements should be determined based on the expected traffic. A minimum bandwidth of 10 Gbps is recommended for setups that include a [Saturn](https://saturn.tech) node. Saturn requires a high-speed connection to handle large amounts of data.
+
+## LAN bandwidth
+
+The bandwidth between different components of a network is also important, especially when transferring data between servers. The internal connectivity between servers should be at least 10 Gbps to ensure that planned sealing capacity is not limited by network performance. It is important to ensure that the servers and switches are capable of delivering the required throughput, and that firewalls are not the bottleneck for this throughput.
+
+## VLANs
+
+Virtual Local Area Networks (VLANs) are commonly used to separate network traffic and enhance security. However, if firewall rules are implemented between VLANs, the firewall can become the bottleneck. To prevent this, it is recommended to keep all sealing workers, Lotus miners, and storage systems in the same VLAN. This allows for data access and transfer without involving routing and firewalls, thus improving network performance.
+
+## Redundancy
+
+Network redundancy is crucial to prevent downtime and ensure uninterrupted operations. By implementing redundancy, individual networking components can fail without disrupting the entire network. Common industry standards for network redundancy include NIC (network interface card) bonding, LACP (Link Aggregation Control Protocol), or MCLAG (Multi-Chassis Link Aggregation Group).
+
+## Common topologies
+
+Depending on the size of the network, different network topologies may be used to optimize performance and scalability. For larger networks, a spine-leaf architecture may be used, while smaller networks may use a simple two-tier architecture.
+
+Spine-leaf architectures provide predictable latency and linear scalability by having multiple L2 leaf switches that connect to the spine switches. On the other hand, smaller networks can be set up with redundant L3 switches or a collapsed spine/leaf design that connect to redundant routers/firewalls.
+
+It is important to determine the appropriate topology based on the specific needs of the organization.
diff --git a/storage-providers/infrastructure/reference-architectures.md b/storage-providers/infrastructure/reference-architectures.md
new file mode 100644
index 000000000..47dfbee10
--- /dev/null
+++ b/storage-providers/infrastructure/reference-architectures.md
@@ -0,0 +1,109 @@
+---
+description: >-
+ This page contains some reference architectures that storage providers can use
+ to build out their infrastructure.
+---
+
+# Reference architectures
+
+## 1 PiB raw architecture
+
+1 PiB raw reference architecture.
+
+The following reference architecture is designed for 1 PiB of raw sectors or raw data to be stored. Let’s discuss the various design choices of this architecture.
+
+### Virtual machines
+
+* 32 CPU Cores
+* 512 GB RAM
+* 8x 2 TB SSD storage
+* 2x 10 GbE ethernet NICs
+
+Lotus daemon and Boost run as Virtual Machines in this architecture. The advantages of virtualization are well-known, including easy reconfiguration of parameters (CPU, memory, disk) and portability. The daemon is not a very intensive process by itself, but must be available at all times. We recommend having a second daemon running as another VM or on backup infrastructure to which you can fail over.
+
+Boost is a resource-intensive process, especially when deals are being ingested over the internet. It also feeds data payload of the deals into the Lotus miner.
+
+We recommend 12-16 cores per VM and 128 GiB of memory. Lotus daemon and Boost need to run on fast storage (SSD or faster). The capacity requirements of Boost depend on the size of deals you are accepting as a storage provider. Its capacity must be sufficient to be landing space for deals until the data can be processed by your sealing cluster in the backend.
+
+Both Lotus daemon and Boost require public internet connectivity. In the case of Boost you also need to consider bandwidth. Depending on the deal size you are accepting, you might require 1 Gbps or 10 Gbps internet bandwidth.
+
+### Lotus miner
+
+* 16 CPU Cores
+* 256 GB RAM
+* 2x 1TB SSD storage
+* 2x 10 GbE ethernet NICs
+
+Lotus miner becomes a less intensive process with dedicated PoST workers separated from it (as in this design). If you use a dedicated storage server or NAS system as the storage target for your sealed and unsealed sectors, Lotus miner eventually could also become a VM. This requires additional CPU and memory on the hypervisor host.
+
+We opted for a standalone Lotus miner in this design and gave it 256 GiB of memory. This is because we operate ZFS at the storage layer, which requires a lot of memory for caching. Lotus miner has enough with 128 GiB of memory when you opt for a dedicated storage server or NAS system for your storage.
+
+### SATA Storage
+
+In this architecture we have attached storage shelves to the Lotus miner with 2.4 PiB of usable capacity. This is the capacity after the creation of a RAIDZ2 file system (double parity). We recommend vdevs of 12 disks wide. In RAIDZ2 this results in 10 data disks and 2 parity disks. Storage systems also don’t behave well at 100% used capacity, so we designed for 20% extra capacity.
+
+### PoST workers
+
+* 16 CPU Cores
+* 128 GB RAM
+* 2x 1TB SSD storage
+* 1x GPU 10+ GB memory, 3500+ CUDA cores
+* 2x 10 GbE ethernet NICs
+
+We have split off the Winning PoST and Window PoST tasks from the Lotus miner. Using dedicated systems for those processes increase the likelihood of winning block rewards and reduces the likelihood of missing a proving deadline. For redundancy, you can run a standby WindowPoSt worker on the WinningPoSt server and vice versa.
+
+PoST workers require 128 GiB of memory at the minimum and require a capable GPU with 10GB of memory and 3500 or more CUDA cores.
+
+### Sealing workers
+
+The sealing workers require the most attention during the design of a solution. Their performance will define the sealing rate of your setup, and hence, how fast you can onboard client deals.
+
+Keep in mind that using [Sealing-as-a-Service](../architecture/sealing-as-a-service.md) reduces the requirements to have a fast performing sealing setup. In this design, however, we plan for an on-premise sealing setup of maximum 7 TiB/day. This theoretical sealing capacity is based on the entire sealing setup running at full speed for 24 hrs/day.
+
+**AP / PC1 worker**
+
+* 32 CPU Cores **with SHA-extensions**
+* 1 TB RAM
+* 2x 1TB SSD OS storage
+* 15+ TB U.3 NVMe sealing / scratch storage
+* 2x 10 GbE (or faster) ethernet NICs
+
+We put the AddPiece and PreCommit1 tasks together on a first worker. This makes sense because AddPiece prepares the scratch space that will be used by the PC1 tasks thereafter. The first critical hardware component for PC1 is the CPU. This must be a CPU with SHA-256 extensions. Most storage providers opt for AMD Epyc (Rome, Milan or Genova) processors, although Ice Lake and newer Intel Xeon processors also support these extensions.
+
+To verify if your CPU has the necessary extensions, run:
+
+```shell
+cat /proc/cpuinfo | grep --color sha_ni
+```
+
+PC1 is a single-threaded process so we require enough CPU cores to run multiple PC1 tasks in parallel. This reference architecture has 32 cores in a PC1, which would allow for \~30 parallel PC1 processes.
+
+For this, we also need 1TB of memory in the PC1 server.
+
+Every PC1 processes requires approximately 450 GiB of sealing scratch space. This scratch space is vital to the performance of the entire sealing setup. It requires U.2 or U.3 NVMe media. For 30 parallel PC1 processes we then need \~15 TiB of scratch space. RAID protection on this volume is not mandatory, however losing 30 sectors during sealing and having to start over does have an impact on your sealing rate.
+
+**PC2 / C1 / C2 workers**
+
+* 32 CPU Cores
+* 512 GB RAM
+* 2x 1TB SSD
+* 1x GPU 10+ GB memory, 3500+ CUDA cores
+* 2x 10 GbE (or faster)
+
+The next step in the sealing pipeline is PreCommit2 (PC2). You could decide to keep it together with PC1, but given the size of our setup (1 PiB) and the likely requirement to scale beyond that later, we split off PC2 in this architecture.
+
+We plan for twice the amount of PC2 workers compared to PC1, as explained under [sealing rate](../architecture/sealing-rate.md). Apart from the memory requirements this process specifically requires a capable GPU with preferably 24GB of memory and 6000 or more CUDA cores.
+
+The scratch space contents from PC1 is copied over to the PC2 worker. This PC2 worker also requires fast NVMe scratch space. Since we plan for 2 PC2 workers against 1 PC1 worker, the capacity of the scratch space per PC2 worker is half of the total scratch space capacity of the PC1 worker, 8 TiB in our case.
+
+C1 doesn’t require much attention for our architecture. C2 however requires a capable GPU again.
+
+## Solo storage providing
+
+Please take a look at the presentation Benjamin Hoejsbo from [PIKNIK](https://www.piknik.com) gave, in which solo storage provider setups are examined. The presentation is from 2022, but the content is still relevant as of March 2023.
+
+{% embed url="https://www.youtube.com/watch?v=LKMjCgo-fkA" %}
+
+{% hint style="info" %}
+We are working to improve this section. If you would like to share your mining setup, please create an issue in the [Filecoin documentation GitHub repository](https://github.com/filecoin-project/filecoin-docs/issues)!
+{% endhint %}
diff --git a/storage-providers/infrastructure/storage.md b/storage-providers/infrastructure/storage.md
new file mode 100644
index 000000000..14cbb5ed8
--- /dev/null
+++ b/storage-providers/infrastructure/storage.md
@@ -0,0 +1,55 @@
+---
+description: >-
+ This page covers RAID configurations, performance implications and
+ availability, I/O behavior for sealed and unsealed sectors, andf read/write
+ performance considerations.
+---
+
+# Storage
+
+## RAID configurations
+
+Storage systems use RAID for protection against data corruption and data loss. Since cost is an important aspect for storage providers, and you are dealing with cold storage mostly, you will be opting for SATA disks is RAID configurations that favor capacity (and read performance). This leads to RAID5, RAID6, RAIDZ and RAIDZ2. Double parity configurations like RAID6 and RAIDZ2 are preferred.
+
+The _width of a volume_ is defined by how many spindles (SATA disks) there are in a RAID group. Typical configurations range between 10+2 and 13+2 disks in a group (in a VDEV in the case of ZFS).
+
+## RAID implications
+
+Although RAIDZ2 provides high fault tolerance, configuring wide VDEVs also has an impact on performance and availability. ZFS performs an automatic healing task called _scrubbing_ which performs a checksum validation over the data and recovers from data corruption. This task is I/O intensive and might get in the way of other tasks that should get priority, like _storage proving_ of sealed sectors.
+
+Another implication of large RAID sets that gets aggravated with very large capacity per disk is the time it takes to rebuild. Rebuilding is the I/O intensive process that takes place when a disk in a RAID group is replaced (typically after a disk failed). If you choose to configure very wide VDEVs while using very large spindles (20TB+) you might experience very long rebuild times which again get in the way of high priority tasks like storage proving.
+
+It is possible though to configure wider VDEVs (RAID groups) for the unsealed sectors. Physically separating sealed and unsealed copies has other advantages, which are explained in [Custom Storage Layout](https://lotus.filecoin.io/storage-providers/operate/custom-storage-layout/).
+
+## I/O Behavior
+
+Storage providers keep copies of sealed sectors and unsealed sectors (for fast retrieval) on their storage systems. However the I/O behavior on sealed sectors is very different from the I/O behavior on unsealed sectors. When [storage proving](../filecoin-economics/storage-proving.md) happens only a very small portion of the data is read by WindowPoSt. A large storage provider will have many sectors in multiple partitions for which WindowPoSt requires fast access to the disks. This is unusual I/O behavior for any storage system.
+
+The unsealed copies are used for fast retrieval of the data towards the customer. Large datasets in chunks of 32 GiB (or 64 GiB depending on the configured sector size) are read.
+
+In order to avoid different tasks competing for read I/O on disk it is recommended to create separate disk pools with their own VDEVs (when using ZFS) for sealed and unsealed copies.
+
+## Write performance
+
+Write access towards the storage also requires your attention. Depending how your storage array is connected (SAS or Ethernet) you will have different transfer speeds towards the sealed storage path. At a sealing capacity of 6 TiB/day you will effectively be writing 12 TiB/day towards the storage (6 TiB sealed, 6 TiB unsealed copies). Both your storage layout and your network need to be able to handle this.
+
+If this 12 TiB were equally spread across the 24 hrs of a day, this would already require 1.14 Gbps.
+
+> 12 TiB _1024 / 24 hr / 3600 sec_ 8 = 1.14 Gbps
+
+The sealing pipeline produces 32 GiB sectors (64 GiB depending on your configured sector size) which are written to the storage. If you configured _batching_ of the commit messages (to reduce total gas fees) then you will write multiple sectors towards disk at once.
+
+A minimum network bandwidth of 10 Gbps is recommended and write cache at the storage layer will be beneficial too.
+
+## Read performance
+
+Read performance is optimal when choosing for RAIDZ2 VDEVs of 10 to 15 disks. RAID-sets using parity like RAIDZ and RAIDZ2 will employ all spindles for read operations. This means read throughput is a lot better compared to reading from a single or a few spindles.
+
+There are 2 types of read operations that are important in the context of Filecoin:
+
+* random read I/O:
+
+ When storage proving happens, a small portion of a sector is read for proving.
+* sequential read I/O:
+
+ When retrievals happens, entire sectors are read from disk and streamed towards the customer via Boost.
diff --git a/storage-providers/skills/README.md b/storage-providers/skills/README.md
new file mode 100644
index 000000000..c914f8a89
--- /dev/null
+++ b/storage-providers/skills/README.md
@@ -0,0 +1,8 @@
+---
+description: >-
+ This section covers the technical skills and knowledge required to become a
+ storage provider.
+---
+
+# Skills
+
diff --git a/storage-providers/skills/industry.md b/storage-providers/skills/industry.md
new file mode 100644
index 000000000..cedb7d97e
--- /dev/null
+++ b/storage-providers/skills/industry.md
@@ -0,0 +1,26 @@
+---
+description: >-
+ This content covers the importance of understanding and meeting specific
+ requirements, certifications, and compliance standards when working with
+ customers in certain industries.
+---
+
+# Industry
+
+When working with customers from certain industries, it is important to understand that specific requirements may apply. This can include certifications and compliance standards that are necessary to meet regulatory and legal obligations. Some examples of such standards include:
+
+**HIPAA**: This standard applies to the handling of medical data and is essential for healthcare providers and organizations.
+
+**SOC2**: This standard applies to service providers and is used to ensure that they have adequate controls in place to protect sensitive data.
+
+**PCI-DSS**: This standard applies to businesses that handle payments and ensures that they have adequate security measures in place to protect payment card data. [PCI-DSS](https://en.wikipedia.org/wiki/Payment\_Card\_Industry\_Data\_Security\_Standard)
+
+**SOX**: This standard applies to businesses operating in the financial sector and is used to ensure that they have adequate controls in place to protect against fraud and financial misconduct.
+
+**GDPR**: This standard applies to businesses that store personally identifiable information (PII) for European customers and is used to ensure that customer data is protected in accordance with European data privacy regulations.
+
+**Local regulations**: These regulations can vary per country and are especially important to consider when doing business with government agencies.
+
+**ISO 27001**: This is a security standard that provides a framework for establishing, implementing, maintaining, and continually improving an information security management system.
+
+Having one or more of these certifications can demonstrate to customers that you have the necessary skills and expertise to handle their data and meet their regulatory requirements. This can be a valuable asset for businesses looking to work with customers in specific industries, as it can provide a competitive edge and help attract new customers. Therefore, it is important for storage providers to stay informed about industry-specific requirements and obtain relevant certifications as necessary.
diff --git a/storage-providers/skills/linux.md b/storage-providers/skills/linux.md
new file mode 100644
index 000000000..7be64c697
--- /dev/null
+++ b/storage-providers/skills/linux.md
@@ -0,0 +1,77 @@
+---
+description: >-
+ This page covers importance of understanding the Linux operating system
+ including installation, configuration, environment variables, performance
+ optimization, and performance analysis.
+---
+
+# Linux
+
+Becoming a storage provider requires a team with a variety of skills. Of all the technical skills needed to run a storage provider business, storage knowledge is important, but arguably, it is even more important to have deep understanding of the Linux operating system.
+
+Where most enterprise storage systems (NAS, SAN and other types) do not require the administrator to have hands-on Linux experience, Filecoin does require a lot more knowledge about Linux. For starters, this is because Filecoin is not just a storage system. It is a blockchain platform that offers decentralized storage. As a storage provider, you must ensure that your production system is always available, not just providing the storage.
+
+## Ubuntu Server LTS
+
+Although Lotus also runs on Mac, production systems generally all run on Linux. More specifically, most storage providers run on Ubuntu. Any Linux distribution should be possible but running Ubuntu makes it easier to find support in the community. Every distribution is a bit different and knowing that all components have been built and tested on Ubuntu, and knowing you have the same OS variables in your environment as someone else, lowers the barrier to starting as a storage provider significantly. Go for Ubuntu Server and choose the latest **LTS** version.
+
+Install Ubuntu LTS as a **headless server**. This means there is no desktop environment or GUI installed. It requires you to do everything on the command line. Not having a desktop environment on your server(s) has multiple advantages:
+
+* It reduces the attack surface of your systems. Fewer packages installed means fewer patches and updates, but more importantly, fewer potential vulnerabilities.
+* As you will be running several tasks on GPU (see [Reference Architectures](../infrastructure/reference-architectures.md)), it’s best to avoid running a desktop environment, which might compete for resources on the GPU.
+
+Exclude the `nvidia-drivers` and `cuda` packages from your updates using [the appropriate command](https://tecadmin.net/exclude-packages-from-apt-upgrade/) set. Once you have a working setup for your specific GPU, you will want to test these packages before you risk breaking them. Many storage providers may need to [install CUDA](https://linux.how2shout.com/how-to-install-cuda-on-ubuntu-20-04-lts-linux/) since some operating systems do not include this package by default.
+
+## Command-line and environment variables
+
+All installation tasks and operational activities happen from the CLI. When installing and upgrading Lotus, it is recommended to build the binaries from source code. Upgrades to Lotus happen every two months or so. If you are unable to perform a mandatory Lotus upgrade, you may become disconnected from the Filecoin network, which means you could be penalized and lose money, so it’s vital to keep Lotus up-to-date.
+
+Configuration parameters for the Lotus client are stored in 2 places:
+
+* into `config.toml` files in `~/.lotus`, `~/.lotusminer` and `~/.lotusworker`
+* into environment variables in `~/.bashrc` if you are using Bash as your shell
+
+Configuration parameters, and most environment variables, are covered in the [Lotus documentation](https://lotus.filecoin.io/storage-providers/setup/configuration/). More specific environment variables around performance tuning can be found on the [Rust FIL Proofs](https://github.com/filecoin-project/rust-fil-proofs) repository on GitHub.
+
+## Linux performance optimization
+
+### Scheduler
+
+Some storage providers fine-tune their setups by enabling CPU-core-pinning of certain tasks (especially PC1), as a starting storage provider it’s not necessary to do that level of tuning. It is essential, however, to have some level of understanding of the [Linux kernel scheduler](https://www.kernel.org/doc/html/latest/scheduler/index.html) to know how to prioritize and deprioritize other tasks in the OS. In the case of Lotus workers you certainly want to prioritize the `lotus-worker` process(es).
+
+### Configuring open file limits
+
+Lotus needs to open a lot of files simultaneously, and it is necessary to reconfigure the OS to support this.
+
+This is one of the examples where not every Linux distribution is the same. On Ubuntu, run the following commands:
+
+```shell
+sudo echo "* soft nofile 32000000" >> /etc/security/limits.conf
+sudo echo "* hard nofile 128000000" >> /etc/security/limits.conf
+sudo echo "fs.nr_open=128000000" >> /etc/sysctl.conf
+sudo echo "fs.file-max=128000000" >> /etc/sysctl.conf
+sudo sysctl -p
+```
+
+### Performance analysis
+
+Diagnosing performance bottlenecks on a system is vital to keeping a well balanced [sealing pipeline](../architecture/sealing-pipeline.md).
+
+There are many good resources to check out when it comes to Linux performance troubleshooting. Brendan Gregg’s [Linux performance analysis in 60 seconds](https://netflixtechblog.com/linux-performance-analysis-in-60-000-milliseconds-accc10403c55) is an excellent introduction. Each one of these commands deserves a chapter on its own but can be further researched in their man pages.
+
+{% embed url="https://www.youtube.com/watch?v=ZdVpKx6Wmc8" %}
+
+The commands used are:
+
+```shell
+uptime
+dmesg | tail
+vmstat 1
+mpstat -P ALL 1
+pidstat 1
+iostat -xz 1
+free -m
+sar -n DEV 1
+sar -n TCP,ETCP 1
+top
+```
diff --git a/storage-providers/skills/network.md b/storage-providers/skills/network.md
new file mode 100644
index 000000000..ce3b2af02
--- /dev/null
+++ b/storage-providers/skills/network.md
@@ -0,0 +1,54 @@
+---
+description: >-
+ This page covers the importance of network skills for a storage provider
+ setup, including network architecture, monitoring, security, infrastructure
+ components, and performance optimizations.
+---
+
+# Network
+
+Network skills are crucial for building and maintaining a well-functioning storage provider setup. The network architecture plays a vital role in the overall performance of the storage system. Without a proper network architecture, the system can easily become bogged down and suffer from poor performance.
+
+To ensure optimal performance, it is essential to understand where the bottlenecks in the network setup are. This requires a good understanding of network topology, protocols, and hardware. It is also important to be familiar with network monitoring tools that can help identify performance issues and optimize network traffic.
+
+In **addition**, knowledge of security protocols and best practices is essential for protecting the storage provider setup from unauthorized access, data breaches, and other security threats. Understanding network security principles can help ensure the integrity and confidentiality of data stored on the network.
+
+**Overall**, network skills are essential for building a high-performing, well-balanced storage provider setup. A solid understanding of network architecture, topology, protocols, and security principles can help optimize performance, prevent bottlenecks, and protect against security threats.
+
+For example, a storage provider setup may have multiple servers that are connected to a network. If the network architecture is not designed properly, data transfer between the servers can become slow and cause delays. This can lead to poor performance and frustrated users. By understanding network architecture and designing the network properly, such bottlenecks can be avoided.
+
+**Monitoring** the network is also crucial in identifying potential performance issues. Network monitoring tools can provide insights into network traffic patterns, bandwidth usage, and other metrics that can be used to optimize performance. Monitoring the network can help identify bottlenecks and areas where improvements can be made.
+
+**Network security** is another important consideration for storage provider setups. A network that is not properly secured can be vulnerable to unauthorized access, data breaches, and other security threats. Network security principles such as firewalls, encryption, and access control can be used to protect the storage provider setup from these threats.
+
+**In summary**, network skills are essential for building and maintaining a high-performing storage provider setup. A solid understanding of network architecture, topology, protocols, and security principles can help optimize performance, prevent bottlenecks, and protect against security threats. Monitoring the network is also crucial in identifying potential issues and ensuring smooth data flow.
+
+## Network infrastructure
+
+Network infrastructure, including switches, routers, and firewalls, plays a crucial role in the performance, reliability, and security of any network. Having the right infrastructure in place is essential to ensuring smooth and seamless network connectivity.
+
+**Switches** are essential for connecting multiple devices within a network. They direct data traffic between devices on the same network, allowing for efficient communication and data transfer. Switches come in a variety of sizes and configurations, from small desktop switches for home networks to large modular switches for enterprise networks. Choosing the right switch for your network can help ensure optimal performance and reliability.
+
+**Routers**, on the other hand, are responsible for connecting different networks together. They enable communication between devices on different networks, such as connecting a home network to the internet or connecting multiple offices in a business network. Routers also provide advanced features such as firewall protection and traffic management to help ensure network security and optimize network performance.
+
+**Firewalls** act as a first line of defense against external threats. They filter traffic coming into and out of a network, blocking malicious traffic and allowing legitimate traffic to pass through. Firewalls come in various forms, from hardware firewalls to software firewalls, and can be configured to block specific types of traffic or restrict access to certain parts of the network.
+
+When it comes to network infrastructure, it’s important to choose switches, routers, and firewalls that are reliable, efficient, and secure. This means taking into account factors such as network size, bandwidth requirements, and security needs when selecting infrastructure components.
+
+In addition to choosing the right components, it’s also important to properly configure and maintain them. This includes tasks such as setting up VLANs, implementing security features such as access control lists (ACLs), and regularly updating firmware and software to ensure optimal performance and security.
+
+**In summary**, network infrastructure, including switches, routers, and firewalls, is essential for building a reliable and secure network. Whether you are building a small home network or a large-scale enterprise network, investing in the right infrastructure components and properly configuring and maintaining them can help ensure optimal network performance, reliability, and security.
+
+## Performance
+
+Performance is a critical aspect of a storage provider setup, particularly when dealing with high network throughput requirements between multiple systems. To ensure optimal performance, it is important to use network benchmarking tools such as iperf and iperf3. These tools make it easy to test network throughput and identify bottlenecks in the network setup.
+
+By using iperf or iperf3, you can determine the maximum network throughput between two systems. This can help you identify potential performance issues, such as network congestion or insufficient bandwidth. By running network benchmarks, you can also determine the impact of changes to the network setup, such as adding or removing hardware components.
+
+As we are dealing with high network throughput requirements between multiple systems (to and from Boost, between the PC1 and PC2 workers and from PC2 to lotus-miner) it is worth learning to work with [`iperf` and `iperf3`](https://iperf.fr), which allow for easy network benchmarking.
+
+As a storage provider, you also need to make trade-offs between performance and cost. Higher bandwidth networks typically offer better performance but come with a higher cost. Therefore, you need to perform calculations to determine whether investing in a higher bandwidth network is worth the cost.
+
+**For example**, if your storage provider setup requires high network throughput, but your budget is limited, you may need to prioritize certain network components, such as switches and network cards, over others. By analyzing the performance impact of each component and comparing it to the cost, you can make informed decisions about which components to invest in.
+
+**In summary**, performance is a critical aspect of a storage provider setup, particularly when dealing with high network throughput requirements. Network benchmarking tools such as iperf and iperf3 can help identify potential performance issues and optimize the network setup. To make informed decisions about the network setup, you also need to make trade-offs between performance and cost by analyzing the impact of each component and comparing it to the cost.
diff --git a/storage-providers/skills/sales.md b/storage-providers/skills/sales.md
new file mode 100644
index 000000000..a7ebfd18a
--- /dev/null
+++ b/storage-providers/skills/sales.md
@@ -0,0 +1,40 @@
+---
+description: >-
+ This content covers the business and commercial aspects of running a storage
+ provider business.
+---
+
+# Sales
+
+Running a storage provider business is not just about having technical expertise and providing storage services. It is also about building and maintaining relationships with clients, negotiating contracts, and managing finances effectively. A storage provider must be able to communicate the value of their services to potential clients, as well as ensure that current clients are satisfied and receive the support they need.
+
+**Sales skills** are important for storage providers to differentiate themselves from the competition, market their services effectively, and attract new customers. This requires an understanding of the market, the needs of potential clients, and how to tailor their services to meet those needs. Storage providers should also be able to identify opportunities for growth and expansion, and have a strategy in place for pursuing those opportunities.
+
+In addition to sales skills, financial management skills are also crucial for running a successful storage provider business. This includes budgeting, forecasting, and managing cash flow effectively. It is important for storage providers to understand the costs associated with providing their services, and to price their services appropriately in order to generate revenue and cover their expenses.
+
+Overall, sales skills are essential for storage providers to succeed in a competitive market. By combining technical expertise with strong business and commercial skills, storage providers can build a successful and sustainable business.
+
+## Business aspects
+
+Running a storage provider business involves several business aspects that require careful attention to ensure long-term success. The first and most obvious aspect is investment in hardware and FIL as collateral. Hardware is the backbone of any storage provider’s business, and ensuring that you have the right equipment to provide reliable and high-performance storage is critical. Additionally, FIL is the primary currency used within the Filecoin network, and as a storage provider, you need to ensure that you have a sufficient amount of FIL as collateral to cover your storage deals.
+
+As your business grows, the amount of hardware and FIL needed will increase, and it is important to have a clear plan for scaling your business. This involves not only investing in additional hardware and FIL but also managing operational costs such as electricity, cooling, and maintenance. Having a skilled business team that can manage and plan for these costs is essential.
+
+Another important aspect of running a storage provider business is managing your relationships with investors, venture capitalists, and banks. These organizations can provide much-needed funding to help grow your business, but they will only invest if they are confident in your ability to manage your business effectively. This means having a strong business plan, a skilled team, and a clear strategy for growth.
+
+In summary, the business aspects of running a storage provider business are critical to its success. This involves managing investments in hardware and FIL, planning for scalability and managing operational costs, and building strong relationships with investors, venture capitalists, and banks.
+
+## Commercial aspects
+
+A storage provider needs to get storage deals to grow his network power and to earn money. There are at least 2 ways to get storage deals, each one requiring specific sales skills.
+
+* Obtaining data replicas from other storage providers and programs:
+
+ Filecoin data programs specify the minimum amount of replicas for a deal. Programs like Slingshot and platforms like Estuary require a minimum of 6 replicas per deal. This means deals need to be stored across multiple storage providers in the ecosystem, so you can work with peers in the network to share clients’ data replicas.
+
+ Working in the ecosystem and building connections with other storage providers takes time and effort, and is essentially a sales activity.
+* Onboarding your own customers:
+
+Acquiring your own customers, and bringing their data onto the Filecoin network, requires business development skills and people on your team who actively work with data owners (customers) to educate them about the advantages of decentralized storage.
+
+It takes additional effort to work with customers and their data, but it has the additional advantage of being able to charge your customer for the data being stored. This means an additional revenue stream compared to only storing copies of deals, and earning block rewards.
diff --git a/storage-providers/skills/security.md b/storage-providers/skills/security.md
new file mode 100644
index 000000000..f543a801e
--- /dev/null
+++ b/storage-providers/skills/security.md
@@ -0,0 +1,43 @@
+---
+description: >-
+ This page covers the importance of security for Filecoin storage providers,
+ including the need to mitigate potential security threats and implement
+ appropriate security controls.
+---
+
+# Security
+
+Being a Filecoin storage provider involves more than just storing customer data. You are also responsible for managing Filecoin wallets and running systems that require 24/7 uptime to avoid losing collateral. This means that if your network or systems are compromised due to a security intrusion, you risk experiencing downtime or even losing access to your systems and storage. Therefore, maintaining proper security is of utmost importance.
+
+As a storage provider, you must have the necessary skills and expertise to identify and mitigate potential security threats. This includes understanding common attack vectors such as phishing, malware, and social engineering. On top of that, you must be proficient at implementing appropriate security controls such as firewalls, intrusion detection and prevention systems, and access controls.
+
+Additionally, you must also be able to keep up with the latest security trends and technologies to ensure that your systems remain secure over time. This can involve ongoing training and education, as well as staying informed about new threats and vulnerabilities.
+
+In summary, as a Filecoin storage provider, you have a responsibility to ensure the security of your customer’s data, your own systems, and the Filecoin network as a whole. This requires a thorough understanding of security best practices, ongoing training and education, and a commitment to staying informed about the latest security trends and technologies.
+
+## Network security
+
+When it comes to network security, it is important to have a solid first line of defense in place. One effective strategy is to implement a redundant firewall setup that can filter incoming traffic as well as traffic between your VLANs.
+
+A next-generation firewall (NGFW) can provide even more robust security by incorporating an intrusion prevention system (IPS) at the network perimeter. This can help to detect and prevent potential threats before they can do any harm.
+
+However, it is important to note that implementing a NGFW with IPS enabled can also have an impact on your internet bandwidth. This is because the IPS will inspect all incoming and outgoing traffic, which can slow down your network performance. As such, it is important to carefully consider your bandwidth requirements and plan accordingly.
+
+## System security
+
+A second layer of defense is system security. There are multiple concepts that contribute to good system security:
+
+* Host-based firewall (UFW)
+
+ Implement a host-based firewall on your systems (also called UFW on Ubuntu), which is `iptables` based.
+* SELinux
+
+ Linux comes with an additional security implementation called `SELinux` (Security Enhanced Linux). Most system administrators will not implement this by default because it takes additional consideration and administration. Once activated though it offers the highest grade of process and user isolation possible on Linux and contributes greatly to better security.
+* Not running as root
+
+ It is a common mistake to run processes or containers as `root`. This is a serious security risk because any attacker who compromises a service running as root automatically obtains root privileges on that system.
+
+ Lotus software does not require root privileges and therefore should run under a normal account (such as a service account, for instance called `lotus`) on the system.
+* Privilege escalation
+
+ Since it is not required that Lotus runs as root, it is also not required for the service account to have privilege escalation. This means you should not allow the `lotus` account to use `sudo`.
diff --git a/storage-providers/skills/storage.md b/storage-providers/skills/storage.md
new file mode 100644
index 000000000..c5ab00b53
--- /dev/null
+++ b/storage-providers/skills/storage.md
@@ -0,0 +1,55 @@
+---
+description: >-
+ This content covers various aspects related to storage in the context of being
+ a Filecoin storage provider.
+---
+
+# Storage
+
+Storage is a critical component of running a successful storage provider in the Filecoin network. While it may seem obvious that having strong storage skills is important, Filecoin requires a unique end-to-end skill set to run a 24/7 application.
+
+[Storage proving](../filecoin-economics/storage-proving.md) requires atypical read-behavior from a storage system. This means that the storage administrator must be able to design for this behavior and analyze the storage system accordingly.
+
+In addition, it is important for storage providers to understand the importance of reliable and efficient storage. Filecoin is designed to incentivize storage providers to keep data safe and secure, and as such, the storage system must be able to maintain high levels of reliability and availability.
+
+Storage providers need to be able to implement and maintain storage infrastructure that meets the needs of clients who require large amounts of storage space. This requires knowledge of various storage technologies, as well as the ability to troubleshoot issues that may arise.
+
+Overall, storage is a critical aspect of the Filecoin network and storage providers must have the necessary skills and knowledge to provide high-quality storage services to clients.
+
+## ZFS
+
+Zettabyte File System (ZFS) is a combined file system and logical volume manager that provides advanced features such as pooled storage, data integrity verification and automatic repair, and data compression. It is a popular choice among storage providers due to its reliability, scalability, and performance.
+
+Configuring ZFS requires knowledge and skills that go beyond the basics of traditional file systems. As a storage provider you need to understand how ZFS manages data, including how it distributes data across disks and how it handles data redundancy and data protection. You must also know how to configure ZFS for optimal performance and how to troubleshoot issues that may arise with ZFS.
+
+In addition to configuring ZFS, storage providers must also be able to manage the disks and other hardware used for storage. This includes selecting and purchasing appropriate hardware, installing and configuring disks and disk controllers, and monitoring disk health and performance.
+
+Having the knowledge and skills to configure ZFS is crucial as a storage providers, as it enables you to provide reliable and high-performance storage services to your clients. Without this expertise, you may struggle to deliver the level of service that your clients expect, which could lead to decreased customer satisfaction and loss of business.
+
+### RAIDZ2
+
+ZFS is a combined file system and volume manager, designed to work efficiently on large-scale storage systems. One of the unique features of ZFS is its built-in support for various types of RAID configurations, which makes it an ideal choice for data storage in a Filecoin network.
+
+As a storage provider, it is crucial to have knowledge and skills in configuring ZFS. This includes understanding how to create virtual devices (VDEVs), which are the building blocks of ZFS storage pools. A VDEV can be thought of as a group of physical devices, such as hard disks, solid-state drives, or even virtual disks, that are used to store data.
+
+In addition, storage providers must also understand how wide VDEVs should ideally be, and how to create storage pools with a specific RAID protection level. RAID is a method of protecting data by distributing it across multiple disks in a way that allows for redundancy and fault tolerance. ZFS has its own types of RAID, known as RAID-Z, which come in different levels of protection.
+
+For example, `RAIDZ2` is a configuration that provides double parity, meaning that two disks can fail simultaneously without data loss. As a storage provider, it is important to understand how to create storage pools with the appropriate level of RAID protection to ensure data durability.
+
+Finally, creating datasets is another important aspect of ZFS configuration. Datasets are logical partitions within a ZFS storage pool that can have their own settings and attributes, such as compression, encryption, and quota. As a storage provider, it is necessary to understand how to create datasets to effectively manage storage and optimize performance.
+
+### Snapshots and replication
+
+ZFS provides built-in protection for data in the form of snapshots. Snapshots are read-only copies of a ZFS file system at a particular point in time. By taking regular snapshots, you can protect your data against accidental deletions, file corruption, or other disasters.
+
+To ensure that your data is fully protected, it is important to configure a snapshot rotation schema. This means defining a schedule for taking snapshots and retaining them for a specified period of time. For example, you might take hourly snapshots and retain them for 24 hours, and then take daily snapshots and retain them for a week.
+
+In addition to snapshots, ZFS also allows you to replicate them to another system running ZFS. This can be useful for creating backups or for replicating data to a remote site for disaster recovery purposes. ZFS replication works by sending incremental changes to the destination system, which ensures that only the changes are sent over the network, rather than the entire dataset. This can significantly reduce the amount of data that needs to be transferred and can help minimize network bandwidth usage.
+
+## Performance analysis
+
+As a storage provider, it is crucial to be able to troubleshoot and resolve any performance issues that may arise. This requires a deep understanding of the underlying storage system and the ability to use Linux performance analytic tools such as `iostat`. These tools can help identify potential bottlenecks in the storage system, such as high disk utilization or slow response times.
+
+In addition to troubleshooting, you must also be able to optimize the performance of your storage system. One way to improve performance is by implementing an NVMe write-cache. NVMe is a protocol designed specifically for solid-state drives, which can greatly improve the speed of write operations. By adding an NVMe write-cache to the storage system, you can reduce the latency of write operations and improve overall system performance.
+
+Read-cache on the other hand is typically not useful in the context of Filecoin. This is because sealed sectors are read very randomly, and unsealed sectors will typically not be read twice. Therefore, storing data in a read-cache would be redundant and add unnecessary overhead to the system.
diff --git a/storage-providers/storage-providers.md b/storage-providers/storage-providers.md
new file mode 100644
index 000000000..33e112a90
--- /dev/null
+++ b/storage-providers/storage-providers.md
@@ -0,0 +1,2 @@
+# Storage providers
+